repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
alxgu/ansible-modules-core | cloud/amazon/iam_policy.py | 16 | 13064 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam_policy
short_description: Manage IAM policies for users, groups, and roles
description:
- Allows uploading or removing IAM policies for IAM users, groups or roles.
version_added: "2.0"
options:
iam_type:
description:
- Type of IAM resource
required: true
default: null
choices: [ "user", "group", "role"]
iam_name:
description:
- Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name.
required: true
policy_name:
description:
- The name label for the policy to create or remove.
required: true
policy_document:
description:
- The path to the properly json formatted policy file (mutually exclusive with C(policy_json))
required: false
policy_json:
description:
- A properly json formatted policy as string (mutually exclusive with C(policy_document), see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly)
required: false
state:
description:
- Whether to create or delete the IAM policy.
required: true
default: null
choices: [ "present", "absent"]
skip_duplicates:
description:
- By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules.
required: false
default: "/"
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will not work removing/adding managed policies.'
author: "Jonathan I. Davila (@defionscode)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create a policy with the name of 'Admin' to the group 'administrators'
tasks:
- name: Assign a policy called Admin to the administrators group
iam_policy:
iam_type: group
iam_name: administrators
policy_name: Admin
state: present
policy_document: admin_policy.json
# Advanced example, create two new groups and add a READ-ONLY policy to both
# groups.
task:
- name: Create Two Groups, Mario and Luigi
iam:
iam_type: group
name: "{{ item }}"
state: present
with_items:
- Mario
- Luigi
register: new_groups
- name: Apply READ-ONLY policy to new groups that have been recently created
iam_policy:
iam_type: group
iam_name: "{{ item.created_group.group_name }}"
policy_name: "READ-ONLY"
policy_document: readonlypolicy.json
state: present
with_items: new_groups.results
# Create a new S3 policy with prefix per user
tasks:
- name: Create S3 policy from template
iam_policy:
iam_type: user
iam_name: "{{ item.user }}"
policy_name: "s3_limited_access_{{ item.prefix }}"
state: present
policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} "
with_items:
- user: s3_user
prefix: s3_user_prefix
'''
import json
import urllib
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def user_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
try:
current_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
matching_policies = []
for pol in current_policies:
'''
urllib is needed here because boto returns url encoded strings instead
'''
if urllib.unquote(iam.get_user_policy(name, pol).
get_user_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_user_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_user_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed, msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_user_policies(name).
list_user_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies
def role_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
try:
current_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
except boto.exception.BotoServerError as e:
if e.error_code == "NoSuchEntity":
# Role doesn't exist so it's safe to assume the policy doesn't either
module.exit_json(changed=False, msg="No such role, policy will be skipped.")
else:
module.fail_json(msg=e.message)
try:
matching_policies = []
for pol in current_policies:
if urllib.unquote(iam.get_role_policy(name, pol).
get_role_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_role_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_role_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed,
msg="%s policy is already absent" % policy_name)
else:
module.fail_json(msg=err.message)
updated_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies
def group_action(module, iam, name, policy_name, skip, pdoc, state):
policy_match = False
changed = False
msg=''
try:
current_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
matching_policies = []
for pol in current_policies:
if urllib.unquote(iam.get_group_policy(name, pol).
get_group_policy_result.policy_document) == pdoc:
policy_match = True
matching_policies.append(pol)
msg=("The policy document you specified already exists "
"under the name %s." % pol)
if state == 'present':
# If policy document does not already exist (either it's changed
# or the policy is not present) or if we're not skipping dupes then
# make the put call. Note that the put call does a create or update.
if not policy_match or (not skip and policy_name not in matching_policies):
changed = True
iam.put_group_policy(name, policy_name, pdoc)
elif state == 'absent':
try:
iam.delete_group_policy(name, policy_name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found.' in error_msg:
changed = False
module.exit_json(changed=changed,
msg="%s policy is already absent" % policy_name)
updated_policies = [cp for cp in iam.get_all_group_policies(name).
list_group_policies_result.
policy_names]
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=changed, msg=error_msg)
return changed, name, updated_policies, msg
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(
default=None, required=True, choices=['user', 'group', 'role']),
state=dict(
default=None, required=True, choices=['present', 'absent']),
iam_name=dict(default=None, required=False),
policy_name=dict(default=None, required=True),
policy_document=dict(default=None, required=False),
policy_json=dict(type='json', default=None, required=False),
skip_duplicates=dict(type='bool', default=True, required=False)
))
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower()
state = module.params.get('state')
name = module.params.get('iam_name')
policy_name = module.params.get('policy_name')
skip = module.params.get('skip_duplicates')
if module.params.get('policy_document') != None and module.params.get('policy_json') != None:
module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set')
if module.params.get('policy_document') != None:
with open(module.params.get('policy_document'), 'r') as json_data:
pdoc = json.dumps(json.load(json_data))
json_data.close()
elif module.params.get('policy_json') != None:
pdoc = module.params.get('policy_json')
# if its a string, assume it is already JSON
if not isinstance(pdoc, basestring):
try:
pdoc = json.dumps(pdoc)
except Exception as e:
module.fail_json(msg='Failed to convert the policy into valid JSON: %s' % str(e))
else:
pdoc=None
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
if region:
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
changed = False
if iam_type == 'user':
changed, user_name, current_policies = user_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, user_name=name, policies=current_policies)
elif iam_type == 'role':
changed, role_name, current_policies = role_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, role_name=name, policies=current_policies)
elif iam_type == 'group':
changed, group_name, current_policies, msg = group_action(module, iam, name,
policy_name, skip, pdoc,
state)
module.exit_json(changed=changed, group_name=name, policies=current_policies, msg=msg)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 |
gdimitris/ChessPuzzler | Virtual_Environment/lib/python2.7/site-packages/sqlalchemy/sql/util.py | 37 | 20383 | # sql/util.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""High level utilities which build upon other modules here.
"""
from .. import exc, util
from .base import _from_objects, ColumnSet
from . import operators, visitors
from itertools import chain
from collections import deque
from .elements import BindParameter, ColumnClause, ColumnElement, \
Null, UnaryExpression, literal_column, Label, _label_reference, \
_textual_label_reference
from .selectable import ScalarSelect, Join, FromClause, FromGrouping
from .schema import Column
join_condition = util.langhelpers.public_factory(
Join._join_condition,
".sql.util.join_condition")
# names that are still being imported from the outside
from .annotation import _shallow_annotate, _deep_annotate, _deep_deannotate
from .elements import _find_columns
from .ddl import sort_tables
def find_join_source(clauses, join_to):
"""Given a list of FROM clauses and a selectable,
return the first index and element from the list of
clauses which can be joined against the selectable. returns
None, None if no match is found.
e.g.::
clause1 = table1.join(table2)
clause2 = table4.join(table5)
join_to = table2.join(table3)
find_join_source([clause1, clause2], join_to) == clause1
"""
selectables = list(_from_objects(join_to))
for i, f in enumerate(clauses):
for s in selectables:
if f.is_derived_from(s):
return i, f
else:
return None, None
def visit_binary_product(fn, expr):
"""Produce a traversal of the given expression, delivering
column comparisons to the given function.
The function is of the form::
def my_fn(binary, left, right)
For each binary expression located which has a
comparison operator, the product of "left" and
"right" will be delivered to that function,
in terms of that binary.
Hence an expression like::
and_(
(a + b) == q + func.sum(e + f),
j == r
)
would have the traversal::
a <eq> q
a <eq> e
a <eq> f
b <eq> q
b <eq> e
b <eq> f
j <eq> r
That is, every combination of "left" and
"right" that doesn't further contain
a binary comparison is passed as pairs.
"""
stack = []
def visit(element):
if isinstance(element, ScalarSelect):
# we don't want to dig into correlated subqueries,
# those are just column elements by themselves
yield element
elif element.__visit_name__ == 'binary' and \
operators.is_comparison(element.operator):
stack.insert(0, element)
for l in visit(element.left):
for r in visit(element.right):
fn(stack[0], l, r)
stack.pop(0)
for elem in element.get_children():
visit(elem)
else:
if isinstance(element, ColumnClause):
yield element
for elem in element.get_children():
for e in visit(elem):
yield e
list(visit(expr))
def find_tables(clause, check_columns=False,
include_aliases=False, include_joins=False,
include_selects=False, include_crud=False):
"""locate Table objects within the given expression."""
tables = []
_visitors = {}
if include_selects:
_visitors['select'] = _visitors['compound_select'] = tables.append
if include_joins:
_visitors['join'] = tables.append
if include_aliases:
_visitors['alias'] = tables.append
if include_crud:
_visitors['insert'] = _visitors['update'] = \
_visitors['delete'] = lambda ent: tables.append(ent.table)
if check_columns:
def visit_column(column):
tables.append(column.table)
_visitors['column'] = visit_column
_visitors['table'] = tables.append
visitors.traverse(clause, {'column_collections': False}, _visitors)
return tables
def unwrap_order_by(clause):
"""Break up an 'order by' expression into individual column-expressions,
without DESC/ASC/NULLS FIRST/NULLS LAST"""
cols = util.column_set()
stack = deque([clause])
while stack:
t = stack.popleft()
if isinstance(t, ColumnElement) and \
(
not isinstance(t, UnaryExpression) or
not operators.is_ordering_modifier(t.modifier)
):
if isinstance(t, _label_reference):
t = t.element
if isinstance(t, (_textual_label_reference)):
continue
cols.add(t)
else:
for c in t.get_children():
stack.append(c)
return cols
def clause_is_present(clause, search):
"""Given a target clause and a second to search within, return True
if the target is plainly present in the search without any
subqueries or aliases involved.
Basically descends through Joins.
"""
for elem in surface_selectables(search):
if clause == elem: # use == here so that Annotated's compare
return True
else:
return False
def surface_selectables(clause):
stack = [clause]
while stack:
elem = stack.pop()
yield elem
if isinstance(elem, Join):
stack.extend((elem.left, elem.right))
elif isinstance(elem, FromGrouping):
stack.append(elem.element)
def selectables_overlap(left, right):
"""Return True if left/right have some overlapping selectable"""
return bool(
set(surface_selectables(left)).intersection(
surface_selectables(right)
)
)
def bind_values(clause):
"""Return an ordered list of "bound" values in the given clause.
E.g.::
>>> expr = and_(
... table.c.foo==5, table.c.foo==7
... )
>>> bind_values(expr)
[5, 7]
"""
v = []
def visit_bindparam(bind):
v.append(bind.effective_value)
visitors.traverse(clause, {}, {'bindparam': visit_bindparam})
return v
def _quote_ddl_expr(element):
if isinstance(element, util.string_types):
element = element.replace("'", "''")
return "'%s'" % element
else:
return repr(element)
class _repr_params(object):
"""A string view of bound parameters, truncating
display to the given number of 'multi' parameter sets.
"""
def __init__(self, params, batches):
self.params = params
self.batches = batches
def __repr__(self):
if isinstance(self.params, (list, tuple)) and \
len(self.params) > self.batches and \
isinstance(self.params[0], (list, dict, tuple)):
msg = " ... displaying %i of %i total bound parameter sets ... "
return ' '.join((
repr(self.params[:self.batches - 2])[0:-1],
msg % (self.batches, len(self.params)),
repr(self.params[-2:])[1:]
))
else:
return repr(self.params)
def adapt_criterion_to_null(crit, nulls):
"""given criterion containing bind params, convert selected elements
to IS NULL.
"""
def visit_binary(binary):
if isinstance(binary.left, BindParameter) \
and binary.left._identifying_key in nulls:
# reverse order if the NULL is on the left side
binary.left = binary.right
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
elif isinstance(binary.right, BindParameter) \
and binary.right._identifying_key in nulls:
binary.right = Null()
binary.operator = operators.is_
binary.negate = operators.isnot
return visitors.cloned_traverse(crit, {}, {'binary': visit_binary})
def splice_joins(left, right, stop_on=None):
if left is None:
return right
stack = [(right, None)]
adapter = ClauseAdapter(left)
ret = None
while stack:
(right, prevright) = stack.pop()
if isinstance(right, Join) and right is not stop_on:
right = right._clone()
right._reset_exported()
right.onclause = adapter.traverse(right.onclause)
stack.append((right.left, right))
else:
right = adapter.traverse(right)
if prevright is not None:
prevright.left = right
if ret is None:
ret = right
return ret
def reduce_columns(columns, *clauses, **kw):
"""given a list of columns, return a 'reduced' set based on natural
equivalents.
the set is reduced to the smallest list of columns which have no natural
equivalent present in the list. A "natural equivalent" means that two
columns will ultimately represent the same value because they are related
by a foreign key.
\*clauses is an optional list of join clauses which will be traversed
to further identify columns that are "equivalent".
\**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys
whose tables are not yet configured, or columns that aren't yet present.
This function is primarily used to determine the most minimal "primary
key" from a selectable, by reducing the set of primary key columns present
in the selectable to just those that are not repeated.
"""
ignore_nonexistent_tables = kw.pop('ignore_nonexistent_tables', False)
only_synonyms = kw.pop('only_synonyms', False)
columns = util.ordered_column_set(columns)
omit = util.column_set()
for col in columns:
for fk in chain(*[c.foreign_keys for c in col.proxy_set]):
for c in columns:
if c is col:
continue
try:
fk_col = fk.column
except exc.NoReferencedColumnError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
except exc.NoReferencedTableError:
# TODO: add specific coverage here
# to test/sql/test_selectable ReduceTest
if ignore_nonexistent_tables:
continue
else:
raise
if fk_col.shares_lineage(c) and \
(not only_synonyms or
c.name == col.name):
omit.add(col)
break
if clauses:
def visit_binary(binary):
if binary.operator == operators.eq:
cols = util.column_set(
chain(*[c.proxy_set for c in columns.difference(omit)]))
if binary.left in cols and binary.right in cols:
for c in reversed(columns):
if c.shares_lineage(binary.right) and \
(not only_synonyms or
c.name == binary.left.name):
omit.add(c)
break
for clause in clauses:
if clause is not None:
visitors.traverse(clause, {}, {'binary': visit_binary})
return ColumnSet(columns.difference(omit))
def criterion_as_pairs(expression, consider_as_foreign_keys=None,
consider_as_referenced_keys=None, any_operator=False):
"""traverse an expression and locate binary criterion pairs."""
if consider_as_foreign_keys and consider_as_referenced_keys:
raise exc.ArgumentError("Can only specify one of "
"'consider_as_foreign_keys' or "
"'consider_as_referenced_keys'")
def col_is(a, b):
# return a is b
return a.compare(b)
def visit_binary(binary):
if not any_operator and binary.operator is not operators.eq:
return
if not isinstance(binary.left, ColumnElement) or \
not isinstance(binary.right, ColumnElement):
return
if consider_as_foreign_keys:
if binary.left in consider_as_foreign_keys and \
(col_is(binary.right, binary.left) or
binary.right not in consider_as_foreign_keys):
pairs.append((binary.right, binary.left))
elif binary.right in consider_as_foreign_keys and \
(col_is(binary.left, binary.right) or
binary.left not in consider_as_foreign_keys):
pairs.append((binary.left, binary.right))
elif consider_as_referenced_keys:
if binary.left in consider_as_referenced_keys and \
(col_is(binary.right, binary.left) or
binary.right not in consider_as_referenced_keys):
pairs.append((binary.left, binary.right))
elif binary.right in consider_as_referenced_keys and \
(col_is(binary.left, binary.right) or
binary.left not in consider_as_referenced_keys):
pairs.append((binary.right, binary.left))
else:
if isinstance(binary.left, Column) and \
isinstance(binary.right, Column):
if binary.left.references(binary.right):
pairs.append((binary.right, binary.left))
elif binary.right.references(binary.left):
pairs.append((binary.left, binary.right))
pairs = []
visitors.traverse(expression, {}, {'binary': visit_binary})
return pairs
class ClauseAdapter(visitors.ReplacingCloningVisitor):
"""Clones and modifies clauses based on column correspondence.
E.g.::
table1 = Table('sometable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
table2 = Table('someothertable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
condition = table1.c.col1 == table2.c.col1
make an alias of table1::
s = table1.alias('foo')
calling ``ClauseAdapter(s).traverse(condition)`` converts
condition to read::
s.c.col1 == table2.c.col1
"""
def __init__(self, selectable, equivalents=None,
include_fn=None, exclude_fn=None,
adapt_on_names=False, anonymize_labels=False):
self.__traverse_options__ = {
'stop_on': [selectable],
'anonymize_labels': anonymize_labels}
self.selectable = selectable
self.include_fn = include_fn
self.exclude_fn = exclude_fn
self.equivalents = util.column_dict(equivalents or {})
self.adapt_on_names = adapt_on_names
def _corresponding_column(self, col, require_embedded,
_seen=util.EMPTY_SET):
newcol = self.selectable.corresponding_column(
col,
require_embedded=require_embedded)
if newcol is None and col in self.equivalents and col not in _seen:
for equiv in self.equivalents[col]:
newcol = self._corresponding_column(
equiv, require_embedded=require_embedded,
_seen=_seen.union([col]))
if newcol is not None:
return newcol
if self.adapt_on_names and newcol is None:
newcol = self.selectable.c.get(col.name)
return newcol
def replace(self, col):
if isinstance(col, FromClause) and \
self.selectable.is_derived_from(col):
return self.selectable
elif not isinstance(col, ColumnElement):
return None
elif self.include_fn and not self.include_fn(col):
return None
elif self.exclude_fn and self.exclude_fn(col):
return None
else:
return self._corresponding_column(col, True)
class ColumnAdapter(ClauseAdapter):
"""Extends ClauseAdapter with extra utility functions.
Key aspects of ColumnAdapter include:
* Expressions that are adapted are stored in a persistent
.columns collection; so that an expression E adapted into
an expression E1, will return the same object E1 when adapted
a second time. This is important in particular for things like
Label objects that are anonymized, so that the ColumnAdapter can
be used to present a consistent "adapted" view of things.
* Exclusion of items from the persistent collection based on
include/exclude rules, but also independent of hash identity.
This because "annotated" items all have the same hash identity as their
parent.
* "wrapping" capability is added, so that the replacement of an expression
E can proceed through a series of adapters. This differs from the
visitor's "chaining" feature in that the resulting object is passed
through all replacing functions unconditionally, rather than stopping
at the first one that returns non-None.
* An adapt_required option, used by eager loading to indicate that
We don't trust a result row column that is not translated.
This is to prevent a column from being interpreted as that
of the child row in a self-referential scenario, see
inheritance/test_basic.py->EagerTargetingTest.test_adapt_stringency
"""
def __init__(self, selectable, equivalents=None,
chain_to=None, adapt_required=False,
include_fn=None, exclude_fn=None,
adapt_on_names=False,
allow_label_resolve=True,
anonymize_labels=False):
ClauseAdapter.__init__(self, selectable, equivalents,
include_fn=include_fn, exclude_fn=exclude_fn,
adapt_on_names=adapt_on_names,
anonymize_labels=anonymize_labels)
if chain_to:
self.chain(chain_to)
self.columns = util.populate_column_dict(self._locate_col)
if self.include_fn or self.exclude_fn:
self.columns = self._IncludeExcludeMapping(self, self.columns)
self.adapt_required = adapt_required
self.allow_label_resolve = allow_label_resolve
self._wrap = None
class _IncludeExcludeMapping(object):
def __init__(self, parent, columns):
self.parent = parent
self.columns = columns
def __getitem__(self, key):
if (
self.parent.include_fn and not self.parent.include_fn(key)
) or (
self.parent.exclude_fn and self.parent.exclude_fn(key)
):
if self.parent._wrap:
return self.parent._wrap.columns[key]
else:
return key
return self.columns[key]
def wrap(self, adapter):
ac = self.__class__.__new__(self.__class__)
ac.__dict__.update(self.__dict__)
ac._wrap = adapter
ac.columns = util.populate_column_dict(ac._locate_col)
if ac.include_fn or ac.exclude_fn:
ac.columns = self._IncludeExcludeMapping(ac, ac.columns)
return ac
def traverse(self, obj):
return self.columns[obj]
adapt_clause = traverse
adapt_list = ClauseAdapter.copy_and_process
def _locate_col(self, col):
c = ClauseAdapter.traverse(self, col)
if self._wrap:
c2 = self._wrap._locate_col(c)
if c2 is not None:
c = c2
if self.adapt_required and c is col:
return None
c._allow_label_resolve = self.allow_label_resolve
return c
def __getstate__(self):
d = self.__dict__.copy()
del d['columns']
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.columns = util.PopulateDict(self._locate_col)
| mit |
todaychi/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/openpyxl3/shared/units.py | 118 | 2029 | # file openpyxl/shared/units.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
import math
def pixels_to_EMU(value):
return int(round(value * 9525))
def EMU_to_pixels(value):
if not value:
return 0
else:
return round(value / 9525.)
def EMU_to_cm(value):
if not value:
return 0
else:
return (EMU_to_pixels(value) * 2.57 / 96)
def pixels_to_points(value):
return value * 0.67777777
def points_to_pixels(value):
if not value:
return 0
else:
return int(math.ceil(value * 1.333333333))
def degrees_to_angle(value):
return int(round(value * 60000))
def angle_to_degrees(value):
if not value:
return 0
else:
return round(value / 60000.)
def short_color(color):
""" format a color to its short size """
if len(color) > 6:
return color[2:]
else:
return color
| apache-2.0 |
longman694/youtube-dl | youtube_dl/extractor/tv4.py | 28 | 4606 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
parse_iso8601,
try_get,
determine_ext,
)
class TV4IE(InfoExtractor):
IE_DESC = 'tv4.se and tv4play.se'
_VALID_URL = r'''(?x)https?://(?:www\.)?
(?:
tv4\.se/(?:[^/]+)/klipp/(?:.*)-|
tv4play\.se/
(?:
(?:program|barn)/(?:[^\?]+)\?video_id=|
iframe/video/|
film/|
sport/|
)
)(?P<id>[0-9]+)'''
_GEO_COUNTRIES = ['SE']
_TESTS = [
{
'url': 'http://www.tv4.se/kalla-fakta/klipp/kalla-fakta-5-english-subtitles-2491650',
'md5': 'cb837212f342d77cec06e6dad190e96d',
'info_dict': {
'id': '2491650',
'ext': 'mp4',
'title': 'Kalla Fakta 5 (english subtitles)',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': int,
'upload_date': '20131125',
},
},
{
'url': 'http://www.tv4play.se/iframe/video/3054113',
'md5': 'cb837212f342d77cec06e6dad190e96d',
'info_dict': {
'id': '3054113',
'ext': 'mp4',
'title': 'Så här jobbar ficktjuvarna - se avslöjande bilder',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'Unika bilder avslöjar hur turisternas fickor vittjas mitt på Stockholms central. Två experter på ficktjuvarna avslöjar knepen du ska se upp för.',
'timestamp': int,
'upload_date': '20150130',
},
},
{
'url': 'http://www.tv4play.se/sport/3060959',
'only_matching': True,
},
{
'url': 'http://www.tv4play.se/film/2378136',
'only_matching': True,
},
{
'url': 'http://www.tv4play.se/barn/looney-tunes?video_id=3062412',
'only_matching': True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
info = self._download_json(
'http://www.tv4play.se/player/assets/%s.json' % video_id,
video_id, 'Downloading video info JSON')
title = info['title']
subtitles = {}
formats = []
# http formats are linked with unresolvable host
for kind in ('hls3', ''):
data = self._download_json(
'https://prima.tv4play.se/api/web/asset/%s/play.json' % video_id,
video_id, 'Downloading sources JSON', query={
'protocol': kind,
'videoFormat': 'MP4+WEBVTT',
})
items = try_get(data, lambda x: x['playback']['items']['item'])
if not items:
continue
if isinstance(items, dict):
items = [items]
for item in items:
manifest_url = item.get('url')
if not isinstance(manifest_url, compat_str):
continue
ext = determine_ext(manifest_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=kind, fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_akamai_formats(
manifest_url, video_id, {
'hls': 'tv4play-i.akamaihd.net',
}))
elif ext == 'webvtt':
subtitles = self._merge_subtitles(
subtitles, {
'sv': [{
'url': manifest_url,
'ext': 'vtt',
}]})
if not formats and info.get('is_geo_restricted'):
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'description': info.get('description'),
'timestamp': parse_iso8601(info.get('broadcast_date_time')),
'duration': int_or_none(info.get('duration')),
'thumbnail': info.get('image'),
'is_live': info.get('is_live') is True,
}
| unlicense |
cqw1/medical_db | dynamodb/setupDynamoDB.py | 1 | 3462 | # Copyright 2014. Amazon Web Services, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from boto.exception import JSONResponseError
from boto.dynamodb2.fields import KeysOnlyIndex
from boto.dynamodb2.fields import GlobalAllIndex
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.fields import RangeKey
from boto.dynamodb2.layer1 import DynamoDBConnection
from boto.dynamodb2.table import Table
import urllib2, json
def getDynamoDBConnection(config=None, endpoint=None, port=None, local=False, use_instance_metadata=False):
if local:
db = DynamoDBConnection(
host=endpoint,
port=port,
aws_secret_access_key='AKIAIZ2NKAVOD4UIJNVQ',
aws_access_key_id='7W5NMo91HGR7cuojCx0kPizKtk65btiB6co315qt',
is_secure=False)
print "==============="
print db.list_tables()
print "==============="
else:
params = {
'is_secure': True
}
# Read from config file, if provided
if config is not None:
if config.has_option('dynamodb', 'region'):
params['region'] = config.get('dynamodb', 'region')
if config.has_option('dynamodb', 'endpoint'):
params['host'] = config.get('dynamodb', 'endpoint')
if config.has_option('dynamodb', 'aws_access_key_id'):
params['aws_access_key_id'] = config.get('dynamodb', 'aws_access_key_id')
params['aws_secret_access_key'] = config.get('dynamodb', 'aws_secret_access_key')
# Use the endpoint specified on the command-line to trump the config file
if endpoint is not None:
params['host'] = endpoint
if 'region' in params:
del params['region']
# Only auto-detect the DynamoDB endpoint if the endpoint was not specified through other config
if 'host' not in params and use_instance_metadata:
response = urllib2.urlopen('http://169.254.169.254/latest/dynamic/instance-identity/document').read()
doc = json.loads(response);
params['host'] = 'dynamodb.%s.amazonaws.com' % (doc['region'])
if 'region' in params:
del params['region']
db = DynamoDBConnection(**params)
return db
def createDevicesTable(db):
try:
devicesTable = Table.create("Devices",
schema=[HashKey("ManualName")],
throughput={
'read': 1,
'write': 1
},
connection=db)
except JSONResponseError, jre:
try:
devicesTable = Table("Devices", connection=db)
except Exception, e:
print "Devices Table doesn't exist."
finally:
return devicesTable
#parse command line args for credentials and such
#for now just assume local is when args are empty
| apache-2.0 |
whn09/tensorflow | tensorflow/python/ops/nn_test.py | 29 | 34361 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for miscellaneous functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.ops.nn_impl import _compute_sampled_logits
from tensorflow.python.platform import test as test_lib
class ZeroFractionTest(test_lib.TestCase):
def _ZeroFraction(self, x):
assert x.shape
total_elements = np.prod(x.shape)
nonzeros = np.count_nonzero(x.flatten())
return 1.0 - nonzeros / total_elements
def testZeroFraction(self):
x_shape = [5, 17]
x_np = np.random.randint(0, 2, size=x_shape).astype(np.float32)
y_np = self._ZeroFraction(x_np)
with self.test_session():
x_tf = constant_op.constant(x_np)
x_tf.set_shape(x_shape)
y_tf = nn_impl.zero_fraction(x_tf)
y_tf_np = y_tf.eval()
eps = 1e-8
self.assertAllClose(y_tf_np, y_np, eps)
def testZeroFractionEmpty(self):
with self.test_session():
x = np.zeros(0)
y = nn_impl.zero_fraction(x).eval()
self.assertTrue(np.isnan(y))
class SoftmaxTest(test_lib.TestCase):
def _softmax(self, x):
assert len(x.shape) == 2
m = x.max(1)[:, np.newaxis]
u = np.exp(x - m)
z = u.sum(1)[:, np.newaxis]
return u / z
def testSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
y_np = self._softmax(x_np)
with self.test_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.softmax(x_tf)
y_tf_last_dim = nn_ops.softmax(x_tf, 1)
y_tf_np = y_tf.eval()
y_tf_last_dim_np = y_tf_last_dim.eval()
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
self.assertAllClose(y_tf_last_dim_np, y_np, eps)
def testGradient(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float64)
with self.test_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.softmax(x_tf)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
eps = 1e-8
self.assertLess(err, eps)
class LogPoissonLossTest(test_lib.TestCase):
def _log_poisson_loss(self, x, z, compute_full_loss=False):
lpl = np.exp(x) - z * x
if compute_full_loss:
stirling_approx = z * np.log(z) - z + 0.5 * np.log(2. * np.pi * z)
lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.)
return lpl
def testLogPoissonLoss(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
z_np = np.random.randint(0, 5, size=x_shape).astype(np.float32)
y_np = self._log_poisson_loss(x_np, z_np, compute_full_loss=False)
y_np_stirling = self._log_poisson_loss(x_np, z_np, compute_full_loss=True)
with self.test_session():
y_tf = nn_impl.log_poisson_loss(z_np, x_np, compute_full_loss=False)
y_tf_stirling = nn_impl.log_poisson_loss(
z_np, x_np, compute_full_loss=True)
y_tf_np = y_tf.eval()
y_tf_np_stirling = y_tf_stirling.eval()
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
self.assertAllClose(y_tf_np_stirling, y_np_stirling, eps)
def testGradient(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float64)
z_np = np.random.randint(0, 5, size=x_shape).astype(np.float64)
with self.test_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_impl.log_poisson_loss(z_np, x_tf, compute_full_loss=False)
y_tf_stirling = nn_impl.log_poisson_loss(
z_np, x_tf, compute_full_loss=True)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
err_stirling = gradient_checker.compute_gradient_error(x_tf, x_shape,
y_tf_stirling,
x_shape)
eps = 1e-6
self.assertLess(err, eps)
self.assertLess(err_stirling, eps)
class LogSoftmaxTest(test_lib.TestCase):
def _log_softmax(self, x):
assert len(x.shape) == 2
m = x.max(1)[:, np.newaxis]
u = x - m
return u - np.log(np.sum(np.exp(u), 1, keepdims=True))
def testLogSoftmax(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float32)
y_np = self._log_softmax(x_np)
with self.test_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.log_softmax(x_tf)
y_tf_np = y_tf.eval()
eps = 1e-3
self.assertAllClose(y_tf_np, y_np, eps)
def testGradient(self):
x_shape = [5, 10]
x_np = np.random.randn(*x_shape).astype(np.float64)
with self.test_session():
x_tf = constant_op.constant(x_np)
y_tf = nn_ops.log_softmax(x_tf)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
eps = 1e-7
self.assertLess(err, eps)
class L2LossTest(test_lib.TestCase):
def testL2Loss(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session():
x = constant_op.constant(
[1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x", dtype=dtype)
l2loss = nn_ops.l2_loss(x)
value = l2loss.eval()
self.assertAllClose(7.0, value)
def testGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1) # Make it reproducible.
x_val = np.random.random_sample(x_shape).astype(np.float64)
with self.test_session():
x = constant_op.constant(x_val, name="x")
output = nn_ops.l2_loss(x)
err = gradient_checker.compute_gradient_error(x, x_shape, output, [1])
print("L2Loss gradient err = %g " % err)
err_tolerance = 1e-11
self.assertLess(err, err_tolerance)
class L2NormalizeTest(test_lib.TestCase):
def _l2Normalize(self, x, dim):
if isinstance(dim, list):
norm = np.linalg.norm(x, axis=tuple(dim))
for d in dim:
norm = np.expand_dims(norm, d)
return x / norm
else:
norm = np.apply_along_axis(np.linalg.norm, dim, x)
return x / np.expand_dims(norm, dim)
def testL2Normalize(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
for dim in range(len(x_shape)):
y_np = self._l2Normalize(x_np, dim)
with self.test_session():
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize(x_tf, dim)
self.assertAllClose(y_np, y_tf.eval())
def testL2NormalizeDimArray(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float32)
dim = [1, 2]
y_np = self._l2Normalize(x_np, dim)
with self.test_session():
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize(x_tf, dim)
self.assertAllClose(y_np, y_tf.eval())
def testL2NormalizeGradient(self):
x_shape = [20, 7, 3]
np.random.seed(1)
x_np = np.random.random_sample(x_shape).astype(np.float64)
for dim in range(len(x_shape)):
with self.test_session():
x_tf = constant_op.constant(x_np, name="x")
y_tf = nn_impl.l2_normalize(x_tf, dim)
err = gradient_checker.compute_gradient_error(x_tf, x_shape, y_tf,
x_shape)
print("L2Normalize gradient err = %g " % err)
self.assertLess(err, 1e-4)
class DropoutTest(test_lib.TestCase):
def testDropout(self):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability.
x_dim = 40
y_dim = 30
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
with self.test_session():
t = constant_op.constant(
1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout = nn_ops.dropout(t, keep_prob)
final_count = 0
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in xrange(0, num_iter):
value = dropout.eval()
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
def testShapedDropout(self):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability. This time with shaped
# noise.
x_dim = 40 * 30
y_dim = 3
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
with self.test_session():
t = constant_op.constant(
1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
self.assertEqual([x_dim, y_dim], dropout.get_shape())
final_count = 0
for _ in xrange(0, num_iter):
value = dropout.eval()
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
def testShapedDropoutCorrelation(self):
# Runs a shaped dropout and tests that the correlations are correct.
x_dim = 40
y_dim = 30
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
with self.test_session():
t = constant_op.constant(
1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in xrange(0, num_iter):
value = dropout.eval()
# Verifies that each y column as only one type of activation.
for i in xrange(x_dim):
sorted_value = np.unique(np.sort(value[i, :]))
self.assertEqual(sorted_value.size, 1)
def testDropoutPlaceholderKeepProb(self):
# Runs dropout with 0-1 tensor 10 times, sum the number of ones and validate
# that it is producing approximately the right number of ones over a large
# number of samples, based on the keep probability.
x_dim = 40
y_dim = 30
num_iter = 10
for keep_prob in [0.1, 0.5, 0.8]:
with self.test_session():
t = constant_op.constant(
1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
keep_prob_placeholder = array_ops.placeholder(dtypes.float32)
dropout = nn_ops.dropout(t, keep_prob_placeholder)
final_count = 0
self.assertEqual([x_dim, y_dim], dropout.get_shape())
for _ in xrange(0, num_iter):
value = dropout.eval(feed_dict={keep_prob_placeholder: keep_prob})
final_count += np.count_nonzero(value)
# Verifies that there are only two values: 0 and 1/keep_prob.
sorted_value = np.unique(np.sort(value))
self.assertEqual(0, sorted_value[0])
self.assertAllClose(1 / keep_prob, sorted_value[1])
# Check that we are in the 15% error range
expected_count = x_dim * y_dim * keep_prob * num_iter
rel_error = math.fabs(final_count - expected_count) / expected_count
print(rel_error)
self.assertTrue(rel_error < 0.15)
def testShapedDropoutUnknownShape(self):
x_dim = 40
y_dim = 30
keep_prob = 0.5
x = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
dropout_x = nn_ops.dropout(
x, keep_prob, noise_shape=array_ops.placeholder(dtypes.int32))
self.assertEqual(x.get_shape(), dropout_x.get_shape())
def testInvalidKeepProb(self):
x_dim = 40
y_dim = 30
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
nn_ops.dropout(t, -1.0)
with self.assertRaises(ValueError):
nn_ops.dropout(t, 1.1)
with self.assertRaises(ValueError):
nn_ops.dropout(t, [0.0, 1.0])
with self.assertRaises(ValueError):
nn_ops.dropout(t, array_ops.placeholder(dtypes.float64))
with self.assertRaises(ValueError):
nn_ops.dropout(t, array_ops.placeholder(dtypes.float32, shape=[2]))
def testShapedDropoutShapeError(self):
# Runs shaped dropout and verifies an error is thrown on misshapen noise.
x_dim = 40
y_dim = 30
keep_prob = 0.5
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim + 10])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, y_dim, 5])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim + 3])
with self.assertRaises(ValueError):
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim])
# test that broadcasting proceeds
_ = nn_ops.dropout(t, keep_prob, noise_shape=[y_dim])
_ = nn_ops.dropout(t, keep_prob, noise_shape=[1, y_dim])
_ = nn_ops.dropout(t, keep_prob, noise_shape=[x_dim, 1])
_ = nn_ops.dropout(t, keep_prob, noise_shape=[1, 1])
def testNoDropoutFast(self):
x = array_ops.zeros((5,))
for p in 1, constant_op.constant(1.0):
y = nn_ops.dropout(x, keep_prob=p)
self.assertTrue(x is y)
class ComputeSampledLogitsTest(test_lib.TestCase):
def setUp(self):
self._num_classes = 5
self._dim = 10
self._batch_size = 3
self._num_shards = 3
def _GenerateTestInputs(self):
np.random.seed(0)
weights = np.random.randn(self._num_classes, self._dim).astype(np.float32)
biases = np.random.randn(self._num_classes).astype(np.float32)
hidden_acts = np.random.randn(self._batch_size,
self._dim).astype(np.float32)
with ops.Graph().as_default() as g:
sharded_weights = variable_scope.get_variable(
"w",
partitioner=partitioned_variables.fixed_size_partitioner(
self._num_shards),
initializer=constant_op.constant(weights))
sharded_biases = variable_scope.get_variable(
"b",
partitioner=partitioned_variables.fixed_size_partitioner(
self._num_shards),
initializer=constant_op.constant(biases))
with self.test_session(graph=g) as sess:
variables.global_variables_initializer().run()
sharded_weights_v, sharded_biases_v = sess.run(
[list(sharded_weights), list(sharded_biases)])
return weights, biases, hidden_acts, sharded_weights_v, sharded_biases_v
def _ComputeSampledLogitsNP(self,
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
num_true=1,
true_expected=None,
sampled_expected=None):
batch_size, dim = hidden_acts.shape
true_logits = np.sum(hidden_acts.reshape(
(batch_size, 1, dim)) * true_w.reshape((batch_size, num_true, dim)),
axis=2)
true_b = true_b.reshape((batch_size, num_true))
true_logits += true_b
sampled_logits = np.dot(hidden_acts, sampled_w.T) + sampled_b
if true_expected is not None:
true_logits -= np.log(true_expected)
if sampled_expected is not None:
sampled_logits -= np.log(sampled_expected[np.newaxis, :])
out_logits = np.concatenate([true_logits, sampled_logits], axis=1)
out_labels = np.hstack((np.ones_like(true_logits) / num_true,
np.zeros_like(sampled_logits)))
return out_logits, out_labels
def _ComputeSampledLogitsTF(self,
weights,
biases,
hidden_acts,
labels,
num_sampled,
num_classes,
num_true,
sampled_vals,
subtract_log_q,
remove_accidental_hits,
name="sampled_loss_TF"):
# Should be called from within a `with test_session():` block
if isinstance(weights, list):
weights_tf = [constant_op.constant(shard) for shard in weights]
else:
weights_tf = constant_op.constant(weights)
if isinstance(biases, list):
biases_tf = [constant_op.constant(shard) for shard in biases]
else:
biases_tf = constant_op.constant(biases)
hidden_acts_tf = constant_op.constant(
hidden_acts, shape=(self._batch_size, self._dim))
labels_tf = constant_op.constant(
labels, dtype=dtypes.int64, shape=(self._batch_size, num_true))
pred_logits_tf, pred_labels_tf = _compute_sampled_logits(
weights_tf,
biases_tf,
labels_tf,
hidden_acts_tf,
num_sampled,
num_classes,
num_true,
sampled_vals,
subtract_log_q=subtract_log_q,
remove_accidental_hits=remove_accidental_hits,
name=name,
partition_strategy="div")
return pred_logits_tf, pred_labels_tf
def testComputeSampledLogitsShapes(self):
# We just check that the shapes of the returned values are correct.
weights, biases, hidden_acts, _, _ = self._GenerateTestInputs()
sampled = [1, 0, 2, 3]
num_sampled = len(sampled)
true_exp = sampled_exp = [1., 1., 1., 1.]
test_sampled_vals = (sampled, true_exp, sampled_exp)
sampled_w, sampled_b = weights[sampled], biases[sampled]
with self.test_session() as sess:
for num_true_test in range(1, 5):
labels = np.random.randint(
low=0,
high=self._num_classes,
size=self._batch_size * num_true_test)
true_w, true_b = weights[labels], biases[labels]
logits_np, labels_np = self._ComputeSampledLogitsNP(
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
num_true=num_true_test)
logits_tf, labels_tf = self._ComputeSampledLogitsTF(
weights,
biases,
hidden_acts,
labels,
num_sampled,
self._num_classes,
num_true=num_true_test,
sampled_vals=test_sampled_vals,
remove_accidental_hits=True,
subtract_log_q=False)
logits_tf_val, labels_tf_val = sess.run([logits_tf, labels_tf])
self.assertEqual(logits_np.shape, logits_tf_val.shape)
self.assertEqual(labels_np.shape, labels_tf_val.shape)
def testComputeSampledLogitsValues(self):
# Here we check the actual numerics.
weights, biases, hidden_acts, sharded_weights, sharded_biases = (
self._GenerateTestInputs())
eps = 1e-3
sampled = [1, 0, 2, 3]
num_sampled = len(sampled)
true_exp = np.empty([self._batch_size, 1], dtype=np.float32)
true_exp.fill(0.5)
sampled_exp = np.empty([num_sampled], dtype=np.float32)
sampled_exp.fill(0.5)
sampled_w, sampled_b = weights[sampled], biases[sampled]
test_sampled_vals = (sampled, true_exp, sampled_exp)
with self.test_session() as sess:
for num_true_test in range(1, 5):
# Generate test data for this run
labels = np.random.randint(
low=0,
high=self._num_classes,
size=self._batch_size * num_true_test)
true_w, true_b = weights[labels], biases[labels]
# Test 1: Without accidental hit removal or subtract_log_q
logits_np, labels_np = self._ComputeSampledLogitsNP(
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
num_true=num_true_test)
logits_tf, labels_tf = self._ComputeSampledLogitsTF(
weights,
biases,
hidden_acts,
labels,
num_sampled,
self._num_classes,
num_true=num_true_test,
sampled_vals=test_sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
name="sampled_loss_test1_num_true%d" % num_true_test)
logits_tf_val, labels_tf_val = sess.run([logits_tf, labels_tf])
self.assertAllClose(logits_np, logits_tf_val, eps)
self.assertAllClose(labels_np, labels_tf_val, eps)
# Test 2: With accidental hit removal, no subtract_log_q
logits_tf, labels_tf = self._ComputeSampledLogitsTF(
weights,
biases,
hidden_acts,
labels,
num_sampled,
self._num_classes,
num_true=num_true_test,
sampled_vals=test_sampled_vals,
subtract_log_q=False,
remove_accidental_hits=True,
name="sampled_loss_test2_num_true%d" % num_true_test)
# Test that the exponentiated logits of accidental hits are near 0.
# First we need to find the hits in this random test run:
labels_reshape = labels.reshape((self._batch_size, num_true_test))
logits_tf_np = logits_tf.eval()
for row in xrange(self._batch_size):
row_labels = labels_reshape[row, :]
for col in xrange(num_sampled):
if sampled[col] in row_labels:
# We need to add the num_true_test offset into logits_*
self.assertNear(
np.exp(logits_tf_np[row, col + num_true_test]), 0., eps)
# Test 3: With subtract_log_q, no accidental hit removal
logits_np, labels_np = self._ComputeSampledLogitsNP(
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
num_true=num_true_test,
true_expected=true_exp,
sampled_expected=sampled_exp)
logits_tf, labels_tf = self._ComputeSampledLogitsTF(
weights,
biases,
hidden_acts,
labels,
num_sampled,
self._num_classes,
num_true=num_true_test,
sampled_vals=test_sampled_vals,
subtract_log_q=True,
remove_accidental_hits=False,
name="sampled_loss_test3_num_true%d" % num_true_test)
logits_tf_val, labels_tf_val = sess.run([logits_tf, labels_tf])
self.assertAllClose(logits_np, logits_tf_val, eps)
self.assertAllClose(labels_np, labels_tf_val, eps)
# Test 4: Test 1, with sharded weights and sharded biases.
logits_np, labels_np = self._ComputeSampledLogitsNP(
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
num_true=num_true_test)
logits_tf, labels_tf = self._ComputeSampledLogitsTF(
sharded_weights,
sharded_biases,
hidden_acts,
labels,
num_sampled,
self._num_classes,
num_true=num_true_test,
sampled_vals=test_sampled_vals,
subtract_log_q=False,
remove_accidental_hits=False,
name="sampled_loss_test1_num_true%d" % num_true_test)
logits_tf_val, labels_tf_val = sess.run([logits_tf, labels_tf])
self.assertAllClose(logits_np, logits_tf_val, eps)
self.assertAllClose(labels_np, labels_tf_val, eps)
def testNCELoss(self):
# A simple test to verify the numerics.
def _SigmoidCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
pred = 1. / (1. + np.exp(-logits))
eps = 0.0001
pred = np.minimum(np.maximum(pred, eps), 1 - eps)
return -targets * np.log(pred) - (1. - targets) * np.log(1. - pred)
weights, biases, hidden_acts, sharded_weights, sharded_biases = (
self._GenerateTestInputs())
labels = [0, 1, 2]
true_w, true_b = weights[labels], biases[labels]
sampled = [1, 0, 2, 3]
num_sampled = len(sampled)
true_exp = np.empty([self._batch_size, 1], dtype=np.float32)
true_exp.fill(0.5)
sampled_exp = np.empty([num_sampled], dtype=np.float32)
sampled_exp.fill(0.5)
sampled_w, sampled_b = weights[sampled], biases[sampled]
test_sampled_vals = (sampled, true_exp, sampled_exp)
with self.test_session():
logits_np, labels_np = self._ComputeSampledLogitsNP(
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
true_expected=true_exp,
sampled_expected=sampled_exp)
nce_loss_np = np.sum(
_SigmoidCrossEntropyWithLogits(logits_np, labels_np), 1)
labels_tf = constant_op.constant(labels, shape=(self._batch_size, 1))
weights_tf = constant_op.constant(weights)
biases_tf = constant_op.constant(biases)
inputs_tf = constant_op.constant(hidden_acts)
nce_loss_tf = nn_impl.nce_loss(
weights_tf,
biases_tf,
labels_tf,
inputs_tf,
num_sampled=num_sampled,
num_classes=self._num_classes,
num_true=1,
sampled_values=test_sampled_vals,
partition_strategy="div")
self.assertAllClose(nce_loss_np, nce_loss_tf.eval(), 1e-4)
# Test with sharded weights and sharded biases.
nce_loss_tf = nn_impl.nce_loss(
sharded_weights,
sharded_biases,
labels_tf,
inputs_tf,
num_sampled=num_sampled,
num_classes=self._num_classes,
num_true=1,
sampled_values=test_sampled_vals,
partition_strategy="div")
self.assertAllClose(nce_loss_np, nce_loss_tf.eval(), 1e-4)
def testSampledSoftmaxLoss(self):
# A simple test to verify the numerics.
def _SoftmaxCrossEntropyWithLogits(logits, targets):
# logits, targets: float arrays of the same shape.
assert logits.shape == targets.shape
stable_exp_logits = np.exp(logits - np.amax(
logits, axis=1, keepdims=True))
pred = stable_exp_logits / np.sum(stable_exp_logits, 1, keepdims=True)
return -np.sum(targets * np.log(pred + 1.0e-20), axis=1)
weights, biases, hidden_acts, sharded_weights, sharded_biases = (
self._GenerateTestInputs())
labels = [0, 1, 2]
true_w, true_b = weights[labels], biases[labels]
sampled = [1, 0, 2, 3]
num_sampled = len(sampled)
true_exp = np.full([self._batch_size, 1], fill_value=0.5, dtype=np.float32)
sampled_exp = np.full([num_sampled], fill_value=0.5, dtype=np.float32)
sampled_w, sampled_b = weights[sampled], biases[sampled]
test_sampled_vals = (sampled, true_exp, sampled_exp)
with self.test_session():
logits_np, labels_np = self._ComputeSampledLogitsNP(
true_w,
true_b,
sampled_w,
sampled_b,
hidden_acts,
true_expected=true_exp,
sampled_expected=sampled_exp)
sampled_softmax_loss_np = _SoftmaxCrossEntropyWithLogits(logits_np,
labels_np)
labels_tf = constant_op.constant(labels, shape=(self._batch_size, 1))
weights_tf = constant_op.constant(weights)
biases_tf = constant_op.constant(biases)
inputs_tf = constant_op.constant(hidden_acts)
sampled_softmax_loss_tf = nn_impl.sampled_softmax_loss(
weights=weights_tf,
biases=biases_tf,
labels=labels_tf,
inputs=inputs_tf,
num_sampled=num_sampled,
num_classes=self._num_classes,
num_true=1,
sampled_values=test_sampled_vals,
remove_accidental_hits=False,
partition_strategy="div")
self.assertAllClose(sampled_softmax_loss_np,
sampled_softmax_loss_tf.eval(), 1e-4)
# Test with sharded weights and sharded biases.
sampled_softmax_loss_tf = nn_impl.sampled_softmax_loss(
weights=sharded_weights,
biases=sharded_biases,
labels=labels_tf,
inputs=inputs_tf,
num_sampled=num_sampled,
num_classes=self._num_classes,
num_true=1,
sampled_values=test_sampled_vals,
remove_accidental_hits=False,
partition_strategy="div")
self.assertAllClose(sampled_softmax_loss_np,
sampled_softmax_loss_tf.eval(), 1e-4)
class CReluTest(test_lib.TestCase):
def test(self):
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
y = np.concatenate([x * (x > 0), -x * (x < 0)], axis=1)
with self.test_session():
z = nn_ops.crelu(constant_op.constant(x)).eval()
self.assertAllClose(y, z, 1e-4)
class ReluTest(test_lib.TestCase):
def test(self):
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
y = np.maximum(x, 0.0)
with self.test_session():
z = nn_ops.relu(constant_op.constant(x)).eval()
self.assertAllEqual(y, z)
def testNaNs(self):
# Test that relu(nan) = nan for various sizes.
for i in range(18):
x = np.zeros(i) + np.nan
with self.test_session():
z = nn_ops.relu(constant_op.constant(x)).eval()
self.assertTrue(np.isnan(z).all())
class MomentsTest(test_lib.TestCase):
def doOutputTest(self, input_shape, moments_axes, tol=1e-4):
for mu in [0.0, 1.0, 1e3]:
for sigma in [1.0, 0.1]:
for keep_dims in [True, False]:
input_values = np.random.rand(*input_shape) * sigma + mu
expected_mean = np.mean(input_values, axis=moments_axes,
keepdims=keep_dims)
expected_var = np.var(input_values, axis=moments_axes,
keepdims=keep_dims)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
inputs = constant_op.constant(input_values,
shape=input_shape,
dtype=dtypes.float32)
mean, variance = nn_impl.moments(inputs,
moments_axes,
keep_dims=keep_dims)
[mean, variance] = sess.run([mean, variance])
# Make sure that there are no NaNs
self.assertFalse(np.isnan(mean).any())
self.assertFalse(np.isnan(variance).any())
self.assertAllClose(mean, expected_mean, rtol=tol, atol=tol)
self.assertAllClose(variance, expected_var, rtol=tol, atol=tol)
def testOutput2DInput0(self):
self.doOutputTest((10, 300), (0,))
def testOutput2DInput1(self):
self.doOutputTest((10, 300), (1,))
def testOutput2DInput01(self):
self.doOutputTest((10, 300), (0, 1))
def testOutput4DInput0(self):
self.doOutputTest((10, 10, 10, 30), (0,))
def testOutput4DInput1(self):
self.doOutputTest((10, 10, 10, 30), (1,))
def testOutput4DInput3(self):
self.doOutputTest((10, 10, 10, 30), (3,))
def testOutput4DInput012(self):
self.doOutputTest((10, 10, 10, 30), (0, 1, 2))
def testOutput4DInput123(self):
self.doOutputTest((10, 10, 10, 30), (1, 2, 3))
def testUnstableOutputShiftNone(self):
input_shape = (10, 300)
moments_axes = (0, 1)
mu, sigma = 1e3, 0.1
tol = 1e-3
input_values = np.random.rand(*input_shape) * sigma + mu
expected_mean = np.mean(input_values, axis=moments_axes)
expected_var = np.var(input_values, axis=moments_axes)
with self.test_session() as sess:
inputs = constant_op.constant(input_values, shape=input_shape,
dtype=dtypes.float32)
mean, variance = nn_impl.moments(inputs, moments_axes, shift=0.0)
[mean, variance] = sess.run([mean, variance])
# Make sure that there are no NaNs
self.assertFalse(np.isnan(mean).any())
self.assertFalse(np.isnan(variance).any())
self.assertAllClose(mean, expected_mean, rtol=tol, atol=tol)
# The variance is unstable
self.assertGreater(np.abs(variance - expected_var), 0.1)
if __name__ == "__main__":
test_lib.main()
| apache-2.0 |
hopkinskong/android_kernel_htc_leo | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
amanharitsh123/zulip | zerver/management/commands/set_default_streams.py | 1 | 1982 |
from argparse import ArgumentParser, RawTextHelpFormatter
from typing import Any, Dict, Text
from zerver.lib.actions import set_default_streams
from zerver.lib.management import ZulipBaseCommand
import sys
class Command(ZulipBaseCommand):
help = """Set default streams for a realm
Users created under this realm will start out with these streams. This
command is not additive: if you re-run it on a realm with a different
set of default streams, those will be the new complete set of default
streams.
For example:
./manage.py set_default_streams --realm=foo --streams=foo,bar,baz
./manage.py set_default_streams --realm=foo --streams="foo,bar,baz with space"
./manage.py set_default_streams --realm=foo --streams=
"""
# Fix support for multi-line usage
def create_parser(self, *args, **kwargs):
# type: (*Any, **Any) -> ArgumentParser
parser = super(Command, self).create_parser(*args, **kwargs)
parser.formatter_class = RawTextHelpFormatter
return parser
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('-s', '--streams',
dest='streams',
type=str,
help='A comma-separated list of stream names.')
self.add_realm_args(parser, True)
def handle(self, **options):
# type: (**str) -> None
realm = self.get_realm(options)
if options["streams"] is None:
print("Please provide a default set of streams (which can be empty,\
with `--streams=`).", file=sys.stderr)
exit(1)
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
stream_dict = {
stream.strip(): {"description": stream.strip(), "invite_only": False}
for stream in options["streams"].split(",")
} # type: Dict[Text, Dict[Text, Any]]
set_default_streams(realm, stream_dict)
| apache-2.0 |
samuroi/SamuROI | samuroi/samuroidata.py | 1 | 15848 | import numpy
import skimage
import skimage.filters
import skimage.morphology
from cached_property import cached_property
from .maskset import MaskSet
from .util.event import Event
class SamuROIData(object):
"""
This is the main data structure of SamuROI.
It combines various aspects of rois, overlays and the video data that is to be analysed.
The most important data attributes of this class are (see respective documentation for further info):
- :py:attr:`samuroi.SamuROIData.masks`
- :py:attr:`samuroi.SamuROIData.data`
- :py:attr:`samuroi.SamuROIData.threshold`
- :py:attr:`samuroi.SamuROIData.overlay`
- :py:attr:`samuroi.SamuROIData.postprocessor`
Whenever some of there attributes are changed via their property setter functions (e.g. `samudata.threshold = 5`)
those setters will emit a signal via some event object (see :py:class:`samuroi.util.event.Event`).
E.g. the events for the above attributes are:
- :py:attr:`samuroi.maskset.MaskSet.added` and :py:attr:`samuroi.maskset.MaskSet.removed`
- :py:attr:`samuroi.SamuROIData.data_changed`
- :py:attr:`samuroi.SamuROIData.update_threshold`
- :py:attr:`samuroi.SamuROIData.overlay_changed`
- :py:attr:`samuroi.SamuROIData.postprocessor_changed`
If one wants to get notified about any of those changes, one can simply connect to the events:
.. code-block:: python
def my_callback():
print "I got triggered :-D"
samudata.masks.added.append(my_callback)
In this manner GUI updates and other custom tasks can be completely separated from the data structure.
"""
def __init__(self, data, morphology=None):
"""
This function will set up the underlying data structure. If no morphology is provided, the morphology array will
be generated as `numpy.max(data,axis=-1)`, i.e. a maximum projection over data along the time axis.
:param data:
:param morphology: This can either be a 2D numpy array with the same shape as the video, or None.
"""
self.postprocessor = self.no_postprocessor
# call the property setter which will initialize the mean data and threshold value
self.data = data
if morphology is None:
self.morphology = numpy.max(data, axis=-1)
else:
self.morphology = morphology
# todo: the active frame is merely a utility to synchronize widgets. maybe it should go to the gui...
self.active_frame = 0
@cached_property
def masks(self):
"""
A joined set of all masks of type :py:class:`samuroi.maskset.MaskSet`.
Use `masks.remove(some_mask)` and `masks.add(some_mask)` to manipulate the set.
Insertions and removements will trigger events that can be connected to.
"""
return MaskSet()
@cached_property
def data_changed(self):
"""This is a signal which should be triggered whenever the underlying 3D numpy data has changed."""
return Event()
@cached_property
def overlay_changed(self):
"""This is a signal which should be triggered whenever the 2D overlay mask has changed."""
return Event()
@cached_property
def postprocessor_changed(self):
"""This is a signal which should be triggered whenever the postprocessor has changed."""
return Event()
@cached_property
def active_frame_changed(self):
"""This is a signal which should be triggered whenever the active frame has changed."""
return Event()
@cached_property
def threshold_changed(self):
"""This signal will be triggered when the threshold is changed."""
return Event()
@cached_property
def morphology_changed(self):
"""This signal will be triggered when the morphology image changed."""
return Event()
@property
def active_frame(self):
"""
The number of the selected frame of the dataset.
:getter: Set active frame number.
:setter: Change to some other frame. This will trigger the :py:attr:`samuroi.SamuROIData.active_frame_changed` event.
:type: int in range `[0,n_frames(`
"""
return self.__active_frame
@active_frame.setter
def active_frame(self, f):
if not 0 <= f < self.data.shape[2]:
raise Exception("Frame needs to be in range [0,{}[".format(self.data.shape[2]))
self.__active_frame = int(f)
self.active_frame_changed()
@property
def pixelmasks(self):
"""
:return: A generator object that allows iteration over all pixel masks in the document.
"""
from .masks.pixel import PixelMask
if PixelMask not in self.masks.types():
return
for i in self.masks[PixelMask]:
yield i
@property
def branchmasks(self):
"""
:return: A generator object that allows iteration over all branch masks in the document.
"""
from .masks.branch import BranchMask
if BranchMask not in self.masks.types():
return
for i in self.masks[BranchMask]:
yield i
@property
def circlemasks(self):
"""
:return: A generator object that allows iteration over all circle masks in the document.
"""
from .masks.circle import CircleMask
if CircleMask not in self.masks.types():
return
for i in self.masks[CircleMask]:
yield i
@property
def polymasks(self):
"""
:return: A generator object that allows iteration over all polygon masks in the document.
"""
from .masks.polygon import PolygonMask
if PolygonMask not in self.masks.types():
return
for i in self.masks[PolygonMask]:
yield i
@property
def segmentationmasks(self):
"""
:return: A generator object that allows iteration over all segmentation masks in the document.
"""
from .masks.segmentation import Segmentation
if Segmentation not in self.masks.types():
return
for i in self.masks[Segmentation]:
yield i
@property
def data(self):
"""
The main video data onto which all masks get applied.
:getter: Get the present video data.
:setter: Change to some other video data. Changing the data will trigger the :py:attr:`samuroi.SamuROIData.data_changed` event.
:type: 3d numpy array dtype should be float or int
"""
return self.__data
@data.setter
def data(self, d):
self.__data = d
self.data_changed()
@property
def morphology(self):
"""
An image which describes the static morphology. A good choice is to use the maximum projection over the non
normalized data.
:getter: obtain the morphology image.
:setter: set the morphology image, will trigger the :py:attr:`samuroi.SamuROIData.morphology_changed` event.
:type: 2D numpy array with same image shape as data.
"""
return self.__morphology
@morphology.setter
def morphology(self, morphology):
if (morphology.shape != self.data.shape[0:2]):
raise Exception("Invalid morphology shape.")
self.__morphology = morphology
# choose some appropriate new threshold value
self.threshold = numpy.percentile(self.morphology.flatten(), q=90)
self.morphology_changed()
@property
def overlay(self):
"""
The overlay is a binary mask with the same shape as the video data such that it can be applied to every frame
of the video data. One can automatically set an overlay via :py:attr:`samuroi.SamuROIData.threshold`, or provide a custom overlay.
:getter: Get the present overlay
:setter: Set the overlay to given binary mask. This will trigger overlay_changed.
:type: numpy.ndarray(dtype=bool,shape=self.data.shape[0:2])
"""
return self.__overlay
@overlay.setter
def overlay(self, m):
if m.shape != self.data.shape[0:2]:
raise Exception("Mask and data shape need to match.")
if m.dtype != bool:
raise Exception("Mask dtype needs to be boolean")
self.__overlay = m
self.overlay_changed()
@property
def threshold(self):
"""
The threshold value controls the overlay mask.
Higher threshold values will exclude larger areas.
Lower threshold values are less restrictive. The threshold value will be initialized to the 90percentile of the
mean data.
:getter: Get the present threshold value
:setter: Set the threshold value. This will trigger a recalculation of :py:attr:`samuroi.SamuROIData.overlay` which in
turn will trigger overlay_changed.
:type: float
"""
return self.__threshold
@threshold.setter
def threshold(self, t):
self.__threshold = t
self.threshold_changed()
elevation_map = skimage.filters.sobel(self.morphology)
markers = numpy.zeros_like(self.morphology)
markers[self.morphology < self.threshold] = 1
markers[self.morphology > self.threshold * 1.1] = 2
segmentation = skimage.morphology.watershed(elevation_map, markers)
self.overlay = segmentation == 2
@property
def no_postprocessor(self):
"""
:return: A default postprocessor which does nothing.
"""
def identity(x):
return x
return identity
@property
def postprocessor(self):
"""
A postprocessor is a function which can be applied on traces.
It takes a 1D numpy array as argument and returns a transformed array with the same shape.
Defaults to :py:attr:`samuroi.SamuROIData.no_postprocessor`.
:getter: get the function object.
:setter: set the function object. Will trigger postprocessor_changed event.
:type: callable object (1D numpy array -> 1D numpy array)
"""
return self.__postprocessor
@postprocessor.setter
def postprocessor(self, pp):
self.__postprocessor = pp
self.postprocessor_changed()
def save_hdf5(self, filename, mask=True, pixels=True, branches=True, circles=True, polygons=True, data=False,
traces=True, segmentations=True):
"""
The structure of the hdf5 file will be as follows:
- overlay (dataset, optional, binary mask defined by threshold value, threshold is stored as attribute)
- data (dataset, optional, the full 3D dataset from which the traces were generated)
- branches/circles... (groups holding different kinds of datasets for masks)
- traces (group that holds a hierarchy for the traces.)
:param filename: filename to use, suffix ".h5" will be added if missing.
:param mask: flag whether mask should be stored in file.
:param pixels:
:param branches:
:param circles:
:param polygons:
:param data: flag whether data should be stored in file.
:param traces:
:param segmentations:
:return:
"""
import h5py
f = h5py.File(filename, mode='w')
f.clear()
if mask:
f.create_dataset('overlay', data=self.overlay)
f['overlay'].attrs['threshold'] = self.threshold
if data:
f.create_dataset('data', data=self.data)
if pixels:
for m in self.pixelmasks:
m.to_hdf5(f)
if polygons:
for m in self.polymasks:
m.to_hdf5(f)
if circles:
for m in self.circlemasks:
m.to_hdf5(f)
if branches:
for m in self.branchmasks:
m.to_hdf5(f)
if segmentations:
for m in self.segmentationmasks:
m.to_hdf5(f)
if traces:
f.create_group('traces')
for m in self.masks:
trace = self.postprocessor(m(self.data, self.overlay))
if hasattr(m, "children"):
if 'traces/' + m.name not in f:
f.create_group('traces/' + m.name)
f.create_dataset('traces/' + m.name + '/trace', data=trace)
else:
f.create_dataset('traces/' + m.name, data=trace)
for m in self.branchmasks:
if len(m.children) > 0:
f.create_dataset('traces/' + m.name + '/linescan', data=m.linescan(self.data, self.overlay))
# write stuff to disc
f.close()
def load_swc(self, swc):
"""
Load the content from the given swc object.
Branches with only a single coordinate will be loaded as circles.
Branches with more than one coordinate as "tubes".
:param swc: A object of type :py:class:`samuroi.plugins.swc.SWCFile`.
"""
# get all parts from the swc file that have at least one segment
from .masks.circle import CircleMask
from .masks.branch import BranchMask
for b in swc.branches:
if len(b) > 1:
mask = BranchMask(data=b)
else:
mask = CircleMask(center=b[['x', 'y']][0], radius=b['radius'][0])
self.masks.add(mask)
def load_hdf5(self, filename, mask=True, pixels=True, branches=True, circles=True, polygons=True, data=True, segmentations=True):
"""
Load data that from hd5 file.
:param filename: The filename/path to read from (include extension)
:param mask: flag whether to read the mask if it is stored in file.
:param pixels: flag whether to read the pixel masks if some are stored in file.
:param branches: flag whether to read the branch masks if some are stored in file.
:param circles: flag whether to read the circle masks if some are stored in file.
:param polygons: flag whether to read the polygon masks if some are stored in file.
:param data: flag whether to read the data if it is stored in file.
:param segmentations: flag whether to read the segmentations if it is stored in file.
"""
from .masks.pixel import PixelMask
from .masks.branch import BranchMask
from .masks.circle import CircleMask
from .masks.polygon import PolygonMask
from .masks.segmentation import Segmentation
import h5py
with h5py.File(filename, mode='r') as f:
if mask:
if 'overlay' not in f:
raise Exception("Overlay data not stored in given hd5 file.")
self.threshold = f['overlay'].attrs['threshold']
if (self.overlay != f['overlay']).any():
print("Warning: overlay threshold does not match with stored binary mask!")
self.overlay = f['overlay'].value
if data:
if 'data' not in f:
raise Exception("Data not stored in given hd5 file.")
self.data = f['data'].value
if pixels:
for m in PixelMask.from_hdf5(f):
self.masks.add(m)
if polygons:
for m in PolygonMask.from_hdf5(f):
self.masks.add(m)
if circles:
for m in CircleMask.from_hdf5(f):
self.masks.add(m)
if branches:
for m in BranchMask.from_hdf5(f):
self.masks.add(m)
if segmentations:
for m in Segmentation.from_hdf5(f):
self.masks.add(m)
| mit |
mesheven/pyOCD | pyocd/coresight/fpb.py | 1 | 5318 | """
mbed CMSIS-DAP debugger
Copyright (c) 2015-2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..core.target import Target
from .component import CoreSightComponent
from ..debug.breakpoints.provider import (Breakpoint, BreakpointProvider)
import logging
class HardwareBreakpoint(Breakpoint):
def __init__(self, comp_register_addr, provider):
super(HardwareBreakpoint, self).__init__(provider)
self.comp_register_addr = comp_register_addr
self.type = Target.BREAKPOINT_HW
class FPB(BreakpointProvider, CoreSightComponent):
FP_CTRL = 0xE0002000
FP_CTRL_KEY = 1 << 1
FP_CTRL_REV_MASK = 0xf0000000
FP_CTRL_REV_SHIFT = 28
FP_COMP0 = 0xE0002008
@classmethod
def factory(cls, ap, cmpid, address):
fpb = cls(ap, cmpid, address)
assert ap.core
ap.core.connect(fpb)
return fpb
def __init__(self, ap, cmpid=None, addr=None):
CoreSightComponent.__init__(self, ap, cmpid, addr)
BreakpointProvider.__init__(self)
assert self.address == FPB.FP_CTRL, "Unexpected FPB base address 0x%08x" % self.address
self.hw_breakpoints = []
self.nb_code = 0
self.nb_lit = 0
self.num_hw_breakpoint_used = 0
self.enabled = False
self.fpb_rev = 1
@property
def revision(self):
return self.fpb_rev
## @brief Inits the FPB.
#
# Reads the number of hardware breakpoints available on the core and disable the FPB
# (Flash Patch and Breakpoint Unit), which will be enabled when the first breakpoint is set.
def init(self):
# setup FPB (breakpoint)
fpcr = self.ap.read32(FPB.FP_CTRL)
self.fpb_rev = 1 + ((fpcr & FPB.FP_CTRL_REV_MASK) >> FPB.FP_CTRL_REV_SHIFT)
if self.fpb_rev not in (1, 2):
logging.warning("Unknown FPB version %d", self.fpb_rev)
self.nb_code = ((fpcr >> 8) & 0x70) | ((fpcr >> 4) & 0xF)
self.nb_lit = (fpcr >> 7) & 0xf
logging.info("%d hardware breakpoints, %d literal comparators", self.nb_code, self.nb_lit)
for i in range(self.nb_code):
self.hw_breakpoints.append(HardwareBreakpoint(FPB.FP_COMP0 + 4*i, self))
# disable FPB (will be enabled on first bp set)
self.disable()
for bp in self.hw_breakpoints:
self.ap.write_memory(bp.comp_register_addr, 0)
def bp_type(self):
return Target.BREAKPOINT_HW
def enable(self):
self.ap.write_memory(FPB.FP_CTRL, FPB.FP_CTRL_KEY | 1)
self.enabled = True
logging.debug('fpb has been enabled')
return
def disable(self):
self.ap.write_memory(FPB.FP_CTRL, FPB.FP_CTRL_KEY | 0)
self.enabled = False
logging.debug('fpb has been disabled')
return
def available_breakpoints(self):
return len(self.hw_breakpoints) - self.num_hw_breakpoint_used
## @brief Test whether an address is supported by the FPB.
#
# For FPBv1, hardware breakpoints are only supported in the range 0x00000000 - 0x1fffffff.
# This was fixed for FPBv2, which supports hardware breakpoints at any address.
def can_support_address(self, addr):
return (self.fpb_rev == 2) or (addr < 0x20000000)
## @brief Set a hardware breakpoint at a specific location in flash.
def set_breakpoint(self, addr):
if not self.enabled:
self.enable()
if not self.can_support_address(addr):
logging.error('Breakpoint out of range 0x%X', addr)
return None
if self.available_breakpoints() == 0:
logging.error('No more available breakpoint!!, dropped bp at 0x%X', addr)
return None
for bp in self.hw_breakpoints:
if not bp.enabled:
bp.enabled = True
comp = 0
if self.fpb_rev == 1:
bp_match = (1 << 30)
if addr & 0x2:
bp_match = (2 << 30)
comp = addr & 0x1ffffffc | bp_match | 1
elif self.fpb_rev == 2:
comp = (addr & 0xfffffffe) | 1
self.ap.write32(bp.comp_register_addr, comp)
logging.debug("BP: wrote 0x%08x to comp @ 0x%08x", comp, bp.comp_register_addr)
bp.addr = addr
self.num_hw_breakpoint_used += 1
return bp
return None
## @brief Remove a hardware breakpoint at a specific location in flash.
def remove_breakpoint(self, bp):
for hwbp in self.hw_breakpoints:
if hwbp.enabled and hwbp.addr == bp.addr:
hwbp.enabled = False
self.ap.write_memory(hwbp.comp_register_addr, 0)
self.num_hw_breakpoint_used -= 1
return
| apache-2.0 |
JackDanger/sentry | src/sentry/south_migrations/0205_auto__add_field_organizationmember_role.py | 11 | 38391 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OrganizationMember.role'
db.add_column('sentry_organizationmember', 'role',
self.gf('django.db.models.fields.CharField')(default='member', max_length=32),
keep_default=False)
def backwards(self, orm):
# Deleting field 'OrganizationMember.role'
db.delete_column('sentry_organizationmember', 'role')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'object_name': 'UserReport', 'index_together': "(('project', 'event_id'),)"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry'] | bsd-3-clause |
throwable-one/lettuce | tests/integration/lib/Django-1.3/django/contrib/gis/db/backends/spatialite/creation.py | 178 | 4393 | import os
from django.conf import settings
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.db.backends.sqlite3.creation import DatabaseCreation
class SpatiaLiteCreation(DatabaseCreation):
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
This method is overloaded to load up the SpatiaLite initialization
SQL prior to calling the `syncdb` command.
"""
if verbosity >= 1:
print "Creating test database '%s'..." % self.connection.alias
test_database_name = self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Confirm the feature set of the test database
self.connection.features.confirm()
# Need to load the SpatiaLite initialization SQL before running `syncdb`.
self.load_spatialite_sql()
call_command('syncdb', verbosity=verbosity, interactive=False, database=self.connection.alias)
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
from django.db import router
if router.allow_syncdb(self.connection.alias, cache.cache_model_class):
call_command('createcachetable', cache._table, database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = self.connection.cursor()
return test_database_name
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(SpatiaLiteCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ', ' +
style.SQL_KEYWORD(str(int(not f.null))) +
');')
if f.spatial_index:
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('CreateSpatialIndex') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ');')
return output
def load_spatialite_sql(self):
"""
This routine loads up the SpatiaLite SQL file.
"""
# Getting the location of the SpatiaLite SQL file, and confirming
# it exists.
spatialite_sql = self.spatialite_init_file()
if not os.path.isfile(spatialite_sql):
raise ImproperlyConfigured('Could not find the required SpatiaLite initialization '
'SQL file (necessary for testing): %s' % spatialite_sql)
# Opening up the SpatiaLite SQL initialization file and executing
# as a script.
sql_fh = open(spatialite_sql, 'r')
try:
cur = self.connection._cursor()
cur.executescript(sql_fh.read())
finally:
sql_fh.close()
def spatialite_init_file(self):
# SPATIALITE_SQL may be placed in settings to tell GeoDjango
# to use a specific path to the SpatiaLite initilization SQL.
return getattr(settings, 'SPATIALITE_SQL',
'init_spatialite-%s.%s.sql' %
self.connection.ops.spatial_version[:2])
| gpl-3.0 |
erichegt/askbot-devel | askbot/migrations/0116_auto__add_field_groupprofile_logo_url__add_unique_emailfeedsetting_sub.py | 17 | 26339 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GroupProfile.logo_url'
db.add_column('askbot_groupprofile', 'logo_url',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True),
keep_default=False)
# Adding unique constraint on 'EmailFeedSetting', fields ['subscriber', 'feed_type']
db.create_unique('askbot_emailfeedsetting', ['subscriber_id', 'feed_type'])
def backwards(self, orm):
# Removing unique constraint on 'EmailFeedSetting', fields ['subscriber', 'feed_type']
db.delete_unique('askbot_emailfeedsetting', ['subscriber_id', 'feed_type'])
# Deleting field 'GroupProfile.logo_url'
db.delete_column('askbot_groupprofile', 'logo_url')
models = {
'askbot.activity': {
'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True'}),
'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'recipients': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'incoming_activity'", 'symmetrical': 'False', 'through': "orm['askbot.ActivityAuditStatus']", 'to': "orm['auth.User']"}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.activityauditstatus': {
'Meta': {'unique_together': "(('user', 'activity'),)", 'object_name': 'ActivityAuditStatus'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Activity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.anonymousanswer': {
'Meta': {'object_name': 'AnonymousAnswer'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_answers'", 'to': "orm['askbot.Post']"}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.anonymousquestion': {
'Meta': {'object_name': 'AnonymousQuestion'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'askbot.award': {
'Meta': {'object_name': 'Award', 'db_table': "u'award'"},
'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.BadgeData']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"})
},
'askbot.badgedata': {
'Meta': {'ordering': "('slug',)", 'object_name': 'BadgeData'},
'awarded_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'awarded_to': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'badges'", 'symmetrical': 'False', 'through': "orm['askbot.Award']", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'askbot.emailfeedsetting': {
'Meta': {'unique_together': "(('subscriber', 'feed_type'),)", 'object_name': 'EmailFeedSetting'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reported_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notification_subscriptions'", 'to': "orm['auth.User']"})
},
'askbot.favoritequestion': {
'Meta': {'object_name': 'FavoriteQuestion', 'db_table': "u'favorite_question'"},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Thread']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_favorite_questions'", 'to': "orm['auth.User']"})
},
'askbot.groupmembership': {
'Meta': {'object_name': 'GroupMembership'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_memberships'", 'to': "orm['askbot.Tag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group_memberships'", 'to': "orm['auth.User']"})
},
'askbot.groupprofile': {
'Meta': {'object_name': 'GroupProfile'},
'group_tag': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'group_profile'", 'unique': 'True', 'to': "orm['askbot.Tag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'askbot.markedtag': {
'Meta': {'object_name': 'MarkedTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_selections'", 'to': "orm['askbot.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tag_selections'", 'to': "orm['auth.User']"})
},
'askbot.post': {
'Meta': {'object_name': 'Post'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['auth.User']"}),
'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'html': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_posts'", 'null': 'True', 'to': "orm['auth.User']"}),
'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'old_answer_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'old_comment_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'old_question_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'post_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'posts'", 'null': 'True', 'blank': 'True', 'to': "orm['askbot.Thread']"}),
'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'askbot.postrevision': {
'Meta': {'ordering': "('-revision',)", 'unique_together': "(('post', 'revision'),)", 'object_name': 'PostRevision'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'postrevisions'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'revisions'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'revised_at': ('django.db.models.fields.DateTimeField', [], {}),
'revision': ('django.db.models.fields.PositiveIntegerField', [], {}),
'revision_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'tagnames': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '125', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'askbot.questionview': {
'Meta': {'object_name': 'QuestionView'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'viewed'", 'to': "orm['askbot.Post']"}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_views'", 'to': "orm['auth.User']"})
},
'askbot.replyaddress': {
'Meta': {'object_name': 'ReplyAddress'},
'address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'allowed_from_email': ('django.db.models.fields.EmailField', [], {'max_length': '150'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reply_addresses'", 'to': "orm['askbot.Post']"}),
'response_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'edit_addresses'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'used_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.repute': {
'Meta': {'object_name': 'Repute', 'db_table': "u'repute'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['askbot.Post']", 'null': 'True', 'blank': 'True'}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'reputation_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'reputed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'askbot.tag': {
'Meta': {'ordering': "('-used_count', 'name')", 'object_name': 'Tag', 'db_table': "u'tag'"},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_tags'", 'to': "orm['auth.User']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_tags'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'tag_wiki': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'described_tag'", 'unique': 'True', 'null': 'True', 'to': "orm['askbot.Post']"}),
'used_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.thread': {
'Meta': {'object_name': 'Thread'},
'accepted_answer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['askbot.Post']"}),
'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'answer_accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'answer_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'close_reason': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'closed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'closed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'favorited_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'unused_favorite_threads'", 'symmetrical': 'False', 'through': "orm['askbot.FavoriteQuestion']", 'to': "orm['auth.User']"}),
'favourite_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'followed_by': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followed_threads'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_activity_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unused_last_active_in_threads'", 'to': "orm['auth.User']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['askbot.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'askbot.vote': {
'Meta': {'unique_together': "(('user', 'voted_post'),)", 'object_name': 'Vote', 'db_table': "u'vote'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {}),
'voted_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'voted_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'votes'", 'to': "orm['askbot.Post']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['askbot'] | gpl-3.0 |
slyphon/pants | src/python/pants/backend/jvm/tasks/ivy_resolve.py | 1 | 7791 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
import time
from textwrap import dedent
from pants.backend.jvm.ivy_utils import IvyUtils
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.backend.jvm.tasks.ivy_task_mixin import IvyTaskMixin
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.binaries import binary_util
from pants.invalidation.cache_manager import VersionedTargetSet
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_property
from pants.util.strutil import safe_shlex_split
class IvyResolve(IvyTaskMixin, NailgunTask):
@classmethod
def register_options(cls, register):
super(IvyResolve, cls).register_options(register)
register('--override', action='append',
fingerprint=True,
help='Specifies a jar dependency override in the form: '
'[org]#[name]=(revision|url) '
'Multiple overrides can be specified using repeated invocations of this flag. '
'For example, to specify 2 overrides: '
'--override=com.foo#bar=0.1.2 '
'--override=com.baz#spam=file:///tmp/spam.jar ')
register('--report', action='store_true', default=False,
help='Generate an ivy resolve html report')
register('--open', action='store_true', default=False,
help='Attempt to open the generated ivy resolve report '
'in a browser (implies --report)')
register('--outdir', help='Emit ivy report outputs in to this directory.')
register('--args', action='append',
fingerprint=True,
help='Pass these extra args to ivy.')
register('--confs', action='append', default=['default'],
help='Pass a configuration to ivy in addition to the default ones.')
register('--mutable-pattern',
fingerprint=True,
help='If specified, all artifact revisions matching this pattern will be treated as '
'mutable unless a matching artifact explicitly marks mutable as False.')
cls.register_jvm_tool(register,
'xalan',
classpath=[
JarDependency(org='xalan', name='xalan', rev='2.7.1'),
])
@classmethod
def product_types(cls):
return ['compile_classpath']
@classmethod
def prepare(cls, options, round_manager):
super(IvyResolve, cls).prepare(options, round_manager)
round_manager.require_data('java')
round_manager.require_data('scala')
def __init__(self, *args, **kwargs):
super(IvyResolve, self).__init__(*args, **kwargs)
self._outdir = self.get_options().outdir or os.path.join(self.workdir, 'reports')
self._open = self.get_options().open
self._report = self._open or self.get_options().report
self._args = []
for arg in self.get_options().args:
self._args.extend(safe_shlex_split(arg))
@memoized_property
def confs(self):
# TODO(John Sirois): This supports `IdeGen` and `Resolve` signalling their resolve confs needs.
# Fix those tasks to do their own resolves.
# See: https://github.com/pantsbuild/pants/issues/2177
confs = set(self.get_options().confs)
for conf in ('default', 'sources', 'javadoc'):
if self.context.products.isrequired('jar_map_{conf}'.format(conf=conf)):
confs.add(conf)
return confs
def execute(self):
"""Resolves the specified confs for the configured targets and returns an iterator over
tuples of (conf, jar path).
"""
executor = self.create_java_executor()
targets = self.context.targets()
compile_classpath = self.context.products.get_data('compile_classpath',
init_func=ClasspathProducts)
resolve_hash_name = self.resolve(executor=executor,
targets=targets,
classpath_products=compile_classpath,
confs=self.confs,
extra_args=self._args)
if self._report:
self._generate_ivy_report(resolve_hash_name)
def check_artifact_cache_for(self, invalidation_check):
# Ivy resolution is an output dependent on the entire target set, and is not divisible
# by target. So we can only cache it keyed by the entire target set.
global_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
return [global_vts]
def _generate_ivy_report(self, resolve_hash_name):
def make_empty_report(report, organisation, module, conf):
no_deps_xml_template = dedent("""<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="ivy-report.xsl"?>
<ivy-report version="1.0">
<info
organisation="{organisation}"
module="{module}"
revision="latest.integration"
conf="{conf}"
confs="{conf}"
date="{timestamp}"/>
</ivy-report>
""").format(
organisation=organisation,
module=module,
conf=conf,
timestamp=time.strftime('%Y%m%d%H%M%S'),
)
with open(report, 'w') as report_handle:
print(no_deps_xml_template, file=report_handle)
tool_classpath = self.tool_classpath('xalan')
report = None
org = IvyUtils.INTERNAL_ORG_NAME
name = resolve_hash_name
xsl = os.path.join(self.ivy_cache_dir, 'ivy-report.xsl')
# Xalan needs this dir to exist - ensure that, but do no more - we have no clue where this
# points.
safe_mkdir(self._outdir, clean=False)
for conf in self.confs:
xml_path = self._get_report_path(conf, resolve_hash_name)
if not os.path.exists(xml_path):
# Make it clear that this is not the original report from Ivy by changing its name.
xml_path = xml_path[:-4] + "-empty.xml"
make_empty_report(xml_path, org, name, conf)
out = os.path.join(self._outdir,
'{org}-{name}-{conf}.html'.format(org=org, name=name, conf=conf))
args = ['-IN', xml_path, '-XSL', xsl, '-OUT', out]
# The ivy-report.xsl genrates tab links to files with extension 'xml' by default, we
# override that to point to the html files we generate.
args.extend(['-param', 'extension', 'html'])
if 0 != self.runjava(classpath=tool_classpath, main='org.apache.xalan.xslt.Process',
args=args, workunit_name='report'):
raise self.Error('Failed to create html report from xml ivy report.')
# The ivy-report.xsl is already smart enough to generate an html page with tab links to all
# confs for a given report coordinate (org, name). We need only display 1 of the generated
# htmls and the user can then navigate to the others via the tab links.
if report is None:
report = out
css = os.path.join(self._outdir, 'ivy-report.css')
if os.path.exists(css):
os.unlink(css)
shutil.copy(os.path.join(self.ivy_cache_dir, 'ivy-report.css'), self._outdir)
if self._open and report:
binary_util.ui_open(report)
def _get_report_path(self, conf, resolve_hash_name):
try:
return IvyUtils.xml_report_path(self.ivy_cache_dir, resolve_hash_name, conf)
except IvyUtils.IvyResolveReportError as e:
raise self.Error('Failed to generate ivy report: {}'.format(e))
| apache-2.0 |
StrellaGroup/erpnext | erpnext/config/getting_started.py | 5 | 5251 | from __future__ import unicode_literals
import frappe
from frappe import _
active_domains = frappe.get_active_domains()
def get_data():
return [
{
"label": _("Accounting"),
"items": [
{
"type": "doctype",
"name": "Item",
"onboard": 1,
},
{
"type": "doctype",
"name": "Customer",
"description": _("Customer database."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Supplier",
"description": _("Supplier database."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Company",
"description": _("Company (not Customer or Supplier) master."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Account",
"icon": "fa fa-sitemap",
"label": _("Chart of Accounts"),
"route": "#Tree/Account",
"description": _("Tree of financial accounts."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Opening Invoice Creation Tool",
"description": _("Create Opening Sales and Purchase Invoices"),
"onboard": 1,
},
]
},
{
"label": _("Data Import and Settings"),
"items": [
{
"type": "doctype",
"name": "Data Import",
"label": _("Import Data"),
"icon": "octicon octicon-cloud-upload",
"description": _("Import Data from CSV / Excel files."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Chart of Accounts Importer",
"labe": _("Chart Of Accounts Importer"),
"description": _("Import Chart Of Accounts from CSV / Excel files"),
"onboard": 1
},
{
"type": "doctype",
"name": "Letter Head",
"description": _("Letter Heads for print templates."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Email Account",
"description": _("Add / Manage Email Accounts."),
"onboard": 1,
},
]
},
{
"label": _("Stock"),
"items": [
{
"type": "doctype",
"name": "Warehouse",
"onboard": 1,
},
{
"type": "doctype",
"name": "Brand",
"onboard": 1,
},
{
"type": "doctype",
"name": "UOM",
"label": _("Unit of Measure") + " (UOM)",
"onboard": 1,
},
{
"type": "doctype",
"name": "Stock Reconciliation",
"onboard": 1,
},
]
},
{
"label": _("CRM"),
"items": [
{
"type": "doctype",
"name": "Lead",
"description": _("Database of potential customers."),
"onboard": 1,
},
{
"type": "doctype",
"label": _("Customer Group"),
"name": "Customer Group",
"icon": "fa fa-sitemap",
"link": "Tree/Customer Group",
"description": _("Manage Customer Group Tree."),
"onboard": 1,
},
{
"type": "doctype",
"label": _("Territory"),
"name": "Territory",
"icon": "fa fa-sitemap",
"link": "Tree/Territory",
"description": _("Manage Territory Tree."),
"onboard": 1,
},
]
},
{
"label": _("Human Resources"),
"items": [
{
"type": "doctype",
"name": "Employee",
"onboard": 1,
},
{
"type": "doctype",
"name": "Employee Attendance Tool",
"hide_count": True,
"onboard": 1,
"dependencies": ["Employee"]
},
{
"type": "doctype",
"name": "Salary Structure",
"onboard": 1,
},
]
},
{
"label": _("Education"),
"condition": "Education" in active_domains,
"items": [
{
"type": "doctype",
"name": "Student",
"onboard": 1,
},
{
"type": "doctype",
"name": "Course",
"onboard": 1,
},
{
"type": "doctype",
"name": "Instructor",
"onboard": 1,
},
{
"type": "doctype",
"name": "Room",
"onboard": 1,
},
]
},
{
"label": _("Healthcare"),
"condition": "Healthcare" in active_domains,
"items": [
{
"type": "doctype",
"name": "Patient",
"label": _("Patient"),
"onboard": 1,
},
{
"type": "doctype",
"name": "Physician",
"label": _("Physician"),
"onboard": 1,
},
{
"type": "doctype",
"name": "Diagnosis",
"label": _("Diagnosis"),
"onboard": 1,
}
]
},
{
"label": _("Agriculture"),
"condition": "Agriculture" in active_domains,
"items": [
{
"type": "doctype",
"name": "Crop",
"onboard": 1,
},
{
"type": "doctype",
"name": "Crop Cycle",
"onboard": 1,
},
{
"type": "doctype",
"name": "Location",
"onboard": 1,
},
{
"type": "doctype",
"name": "Fertilizer",
"onboard": 1,
}
]
},
{
"label": _("Non Profit"),
"condition": "Non Profit" in active_domains,
"items": [
{
"type": "doctype",
"name": "Member",
"description": _("Member information."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Volunteer",
"description": _("Volunteer information."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Chapter",
"description": _("Chapter information."),
"onboard": 1,
},
{
"type": "doctype",
"name": "Donor",
"description": _("Donor information."),
"onboard": 1,
},
]
}
] | gpl-3.0 |
bregman-arie/ansible | lib/ansible/modules/network/eos/eos_l2_interface.py | 25 | 10611 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_l2_interface
version_added: "2.5"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage L2 interfaces on Arista EOS network devices.
description:
- This module provides declarative management of L2 interfaces
on Arista EOS network devices.
notes:
- Tested against EOS 4.15
options:
name:
description:
- Name of the interface
required: true
aliases: ['interface']
mode:
description:
- Mode in which interface needs to be configured.
choices: ['access','trunk']
access_vlan:
description:
- Configure given VLAN in access port.
If C(mode=access), used as the access VLAN ID.
native_vlan:
description:
- Native VLAN to be configured in trunk port.
If C(mode=trunk), used as the trunk native VLAN ID.
trunk_allowed_vlans:
description:
- List of allowed VLANs in a given trunk port.
If C(mode=trunk), these are the ONLY VLANs that will be
configured on the trunk, i.e. C(2-10,15).
aliases: ['trunk_vlans']
aggregate:
description:
- List of Layer-2 interface definitions.
state:
description:
- Manage the state of the Layer-2 Interface configuration.
default: present
choices: ['present','absent', 'unconfigured']
extends_documentation_fragment: eos
"""
EXAMPLES = """
- name: Ensure Ethernet1 does not have any switchport
eos_l2_interface:
name: Ethernet1
state: absent
- name: Ensure Ethernet1 is configured for access vlan 20
eos_l2_interface:
name: Ethernet1
mode: access
access_vlan: 20
- name: Ensure Ethernet1 is a trunk port and ensure 2-50 are being tagged (doesn't mean others aren't also being tagged)
eos_l2_interface:
name: Ethernet1
mode: trunk
native_vlan: 10
trunk_allowed_vlans: 2-50
- name: Set switchports on aggregate
eos_l2_interface:
aggregate:
- { name: ethernet1, mode: access, access_vlan: 20}
- { name: ethernet2, mode: trunk, native_vlan: 10}
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always.
type: list
sample:
- interface ethernet1
- switchport access vlan 20
"""
import re
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import NetworkConfig
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.eos.eos import get_config, load_config, run_commands
from ansible.module_utils.network.eos.eos import eos_argument_spec
def parse_config_argument(configobj, name, arg=None):
cfg = configobj['interface %s' % name]
cfg = '\n'.join(cfg.children)
match = re.search(r'%s (.+)$' % arg, cfg, re.M)
if match:
return match.group(1).strip()
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
name = w['name']
state = w['state']
mode = w['mode']
access_vlan = w['access_vlan']
native_vlan = w['native_vlan']
trunk_allowed_vlans = w['trunk_allowed_vlans']
interface = 'interface ' + name
commands.append(interface)
obj_in_have = search_obj_in_list(name, have)
if not obj_in_have:
module.fail_json(msg='invalid interface {0}'.format(name))
if state == 'absent':
if obj_in_have['mode'] == 'access':
commands.append('no switchport access vlan {0}'.format(obj_in_have['access_vlan']))
if obj_in_have['mode'] == 'trunk':
commands.append('no switchport mode trunk')
if obj_in_have['native_vlan']:
commands.append('no switchport trunk native vlan {0}'.format(obj_in_have['native_vlan']))
if obj_in_have['trunk_allowed_vlans']:
commands.append('no switchport trunk allowed vlan {0}'.format(obj_in_have['trunk_allowed_vlans']))
if obj_in_have['state'] == 'present':
commands.append('no switchport')
else:
if obj_in_have['state'] == 'absent':
commands.append('switchport')
commands.append('switchport mode {0}'.format(mode))
if access_vlan:
commands.append('switchport access vlan {0}'.format(access_vlan))
if native_vlan:
commands.append('switchport trunk native vlan {0}'.format(native_vlan))
if trunk_allowed_vlans:
commands.append('switchport trunk allowed vlan {0}'.format(trunk_allowed_vlans))
else:
if mode != obj_in_have['mode']:
if obj_in_have['mode'] == 'access':
commands.append('no switchport access vlan {0}'.format(obj_in_have['access_vlan']))
if native_vlan:
commands.append('switchport trunk native vlan {0}'.format(native_vlan))
if trunk_allowed_vlans:
commands.append('switchport trunk allowed vlan {0}'.format(trunk_allowed_vlans))
else:
if obj_in_have['native_vlan']:
commands.append('not switchport trunk native vlan {0}'.format(obj_in_have['native_vlan']))
if obj_in_have['trunk_allowed_vlans']:
commands.append('not switchport trunk allowed vlan {0}'.format(obj_in_have['trunk_allowed_vlans']))
commands.append('switchport access vlan {0}'.format(access_vlan))
else:
if mode == 'access':
if access_vlan != obj_in_have['access_vlan']:
commands.append('switchport access vlan {0}'.format(access_vlan))
else:
if native_vlan != obj_in_have['native_vlan'] and native_vlan:
commands.append('switchport trunk native vlan {0}'.format(native_vlan))
if trunk_allowed_vlans != obj_in_have['trunk_allowed_vlans'] and trunk_allowed_vlans:
commands.append('switchport trunk allowed vlan {0}'.format(trunk_allowed_vlans))
if commands[-1] == interface:
commands.pop(-1)
return commands
def map_config_to_obj(module):
config = get_config(module, flags=['| section interface'])
configobj = NetworkConfig(indent=3, contents=config)
match = re.findall(r'^interface (\S+)', config, re.M)
if not match:
return list()
instances = list()
for item in set(match):
command = 'sh int {0} switchport | include Switchport'
switchport_cfg = run_commands(module, command.format(item))[0].split(':')[1].strip()
if switchport_cfg == 'Enabled':
state = 'present'
else:
state = 'absent'
obj = {
'name': item.lower(),
'state': state,
}
obj['access_vlan'] = parse_config_argument(configobj, item, 'switchport access vlan')
obj['native_vlan'] = parse_config_argument(configobj, item, 'switchport trunk native vlan')
obj['trunk_allowed_vlans'] = parse_config_argument(configobj, item, 'switchport trunk allowed vlan')
if obj['access_vlan']:
obj['mode'] = 'access'
else:
obj['mode'] = 'trunk'
instances.append(obj)
return instances
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
item['name'] = item['name'].lower()
obj.append(item.copy())
else:
obj.append({
'name': module.params['name'].lower(),
'mode': module.params['mode'],
'access_vlan': module.params['access_vlan'],
'native_vlan': module.params['native_vlan'],
'trunk_allowed_vlans': module.params['trunk_allowed_vlans'],
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(type='str', aliases=['interface']),
mode=dict(choices=['access', 'trunk']),
access_vlan=dict(type='str'),
native_vlan=dict(type='str'),
trunk_allowed_vlans=dict(type='str', aliases=['trunk_vlans']),
state=dict(default='present',
choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(eos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['access_vlan', 'native_vlan'],
['access_vlan', 'trunk_allowed_vlans']],
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
hackerberry/ooni-probe | ooni/bridget/utils/nodes.py | 2 | 5611 | #!/usr/bin/env python
# -*- coding: UTF-8
"""
nodes
*****
This contains all the code related to Nodes
both network and code execution.
:copyright: (c) 2012 by Arturo Filastò, Isis Lovecruft
:license: see LICENSE for more details.
"""
import os
from binascii import hexlify
try:
import paramiko
except:
print "Error: module paramiko is not installed."
from pprint import pprint
import sys
import socks
import xmlrpclib
class Node(object):
def __init__(self, address, port):
self.address = address
self.port = port
class LocalNode(object):
def __init__(self):
pass
"""
[]: node = NetworkNode("192.168.0.112", 5555, "SOCKS5")
[]: node_socket = node.wrap_socket()
"""
class NetworkNode(Node):
def __init__(self, address, port, node_type="SOCKS5", auth_creds=None):
self.node = Node(address,port)
# XXX support for multiple types
# node type (SOCKS proxy, HTTP proxy, GRE tunnel, ...)
self.node_type = node_type
# type-specific authentication credentials
self.auth_creds = auth_creds
def _get_socksipy_socket(self, proxy_type, auth_creds):
import socks
s = socks.socksocket()
# auth_creds[0] -> username
# auth_creds[1] -> password
s.setproxy(proxy_type, self.node.address, self.node.port,
self.auth_creds[0], self.auth_creds[1])
return s
def _get_socket_wrapper(self):
if (self.node_type.startswith("SOCKS")): # SOCKS proxies
if (self.node_type != "SOCKS5"):
proxy_type = socks.PROXY_TYPE_SOCKS5
elif (self.node_type != "SOCKS4"):
proxy_type = socks.PROXY_TYPE_SOCKS4
else:
print "We don't know this proxy type."
sys.exit(1)
return self._get_socksipy_socket(proxy_type)
elif (self.node_type == "HTTP"): # HTTP proxies
return self._get_socksipy_socket(PROXY_TYPE_HTTP)
else: # Unknown proxies
print "We don't know this proxy type."
sys.exit(1)
def wrap_socket(self):
return self._get_socket_wrapper()
class CodeExecNode(Node):
def __init__(self, address, port, node_type, auth_creds):
self.node = Node(address,port)
# node type (SSH proxy, etc.)
self.node_type = node_type
# type-specific authentication credentials
self.auth_creds = auth_creds
def add_unit(self):
pass
def get_status(self):
pass
class PlanetLab(CodeExecNode):
def __init__(self, address, auth_creds, ooni):
self.auth_creds = auth_creds
self.config = ooni.utils.config
self.logger = ooni.logger
self.name = "PlanetLab"
def _api_auth(self):
api_server = xmlrpclib.ServerProxy('https://www.planet-lab.org/PLCAPI/')
auth = {}
## should be changed to separate node.conf file
auth['Username'] = self.config.main.pl_username
auth['AuthString'] = self.config.main.pl_password
auth['AuthMethod'] = "password"
authorized = api_server.AuthCheck(auth)
if authorized:
print 'We are authorized!'
return auth
else:
print 'Authorization failed. Please check your settings for pl_username and pl_password in the ooni-probe.conf file.'
def _search_for_nodes(self, node_filter=None):
api_server = xmlrpclib.ServerProxy('https://www.planet-lab.org/PLCAPI/', allow_none=True)
node_filter = {'hostname': '*.cert.org.cn'}
return_fields = ['hostname', 'site_id']
all_nodes = api_server.GetNodes(self.api_auth(), node_filter, boot_state_filter)
pprint(all_nodes)
return all_nodes
def _add_nodes_to_slice(self):
api_server = xmlrpclib.ServerProxy('https://www.planet-lab.org/PLCAPI/', allow_none=True)
all_nodes = self.search_for_nodes()
for node in all_nodes:
api_server.AddNode(self.api_auth(), node['site_id'], all_nodes)
print 'Adding nodes %s' % node['hostname']
def _auth_login(slicename, machinename):
"""Attempt to authenticate to the given PL node, slicename and
machinename, using any of the private keys in ~/.ssh/ """
agent = paramiko.Agent()
agent_keys = agent.get_keys()
if len(agent_keys) == 0:
return
for key in agent_keys:
print 'Trying ssh-agent key %s' % hexlify(key.get_fingerprint()),
try:
paramiko.transport.auth_publickey(machinename, slicename)
print 'Public key authentication to PlanetLab node %s successful.' % machinename,
return
except paramiko.SSHException:
print 'Public key authentication to PlanetLab node %s failed.' % machinename,
def _get_command():
pass
def ssh_and_run_(slicename, machinename, command):
"""Attempt to make a standard OpenSSH client to PL node, and run
commands from a .conf file."""
## needs a way to specify 'ssh -l <slicename> <machinename>'
## with public key authentication.
command = PlanetLab.get_command()
client = paramiko.SSHClient()
client.load_system_host_keys()
client.connect(machinename)
stdin, stdout, stderr = client.exec_command(command)
def send_files_to_node(directory, files):
"""Attempt to rsync a tree to the PL node."""
pass
def add_unit():
pass
def get_status():
pass
| bsd-2-clause |
NicholasMerrill/avwxsummarizer | avwxsummarizer/indicator_light_script.py | 2 | 2776 | import sys
from avwx.models import Metar, MetarSet, CloudLayer
from colorama import Fore, Back, Style
class Indicator(object):
color = None
reason = None
def __init__(self, color, reason=None):
if color not in ['red', 'yellow', 'green']:
raise Exception("Invalid color: %s" % color)
self.color = color
self.reason = reason
def __str__(self):
if self.color == 'red':
term_str = Fore.WHITE + Back.RED + Style.BRIGHT
elif self.color == 'yellow':
term_str = Fore.BLACK + Back.YELLOW
elif self.color == 'green':
term_str = Fore.BLACK + Back.GREEN
else:
raise Exception("Unexpected color")
return term_str + " %s LIGHT%s " %(self.color.upper(), (' (%s)' % self.reason) if self.reason else '') \
+ Fore.RESET + Back.RESET + Style.RESET_ALL
def get_bad_indicators(metar):
indicators = []
if metar.flight_category not in ['VFR']:
indicators.append(Indicator('red', "Flight Category: %s" % metar.flight_category))
if metar.visibility < 8:
indicators.append(Indicator('red', "Visibility: %.2f sm" % metar.visibility))
cloud_base = metar.get_ceiling_cloud_layer(CloudLayer.SCATTERED_PCT)
if cloud_base is not None:
if cloud_base.height <= 3500:
indicators.append(Indicator('red', "Cloud Base: %s AGL" % cloud_base))
elif cloud_base.height <= 6000:
indicators.append(Indicator('red', "Cloud Base: %s AGL" % cloud_base))
if metar.wind.speed > 20:
indicators.append(Indicator('red', "High Winds: %s kts" % metar.wind.speed))
elif metar.wind.speed > 10:
indicators.append(Indicator('yellow', "Wind: %s kts" % metar.wind.speed))
# Gusts
if metar.wind.gust is not None:
gust_diff = metar.wind.gust - metar.wind.speed
if gust_diff >= 10:
indicators.append(Indicator('red', "High Wind Gusts: %s kts" % metar.wind.gust))
elif gust_diff >= 5:
indicators.append(Indicator('yellow', "Wind Gusting: %s kts" % metar.wind.gust))
if metar.temp <= -4:
indicators.append(Indicator('yellow', "It's chilly: %.0fC (%.0fF)" % (metar.temp, (metar.temp * 9 / 5 + 32))))
return indicators
if len(sys.argv) != 2:
print "Must enter a single airport identifier as an argument."
sys.exit(1)
metar_set = MetarSet("%s" % sys.argv[1])
metar_set.refresh()
#metar = Metar('KAPA', fake='mock.xml')
metar = metar_set.get_latest()
if metar is None:
print "No METARs found"
sys.exit(1)
bad_indicators = get_bad_indicators(metar)
print metar.raw_text
if len(bad_indicators) == 0:
print Indicator('green')
else:
for indicator in bad_indicators:
print indicator
| mit |
craisins/yosbot | plugins/dictionary.py | 21 | 3013 | import re
from util import hook, http
@hook.command('u')
@hook.command
def urban(inp):
'''.u/.urban <phrase> -- looks up <phrase> on urbandictionary.com'''
url = 'http://www.urbandictionary.com/iphone/search/define'
page = http.get_json(url, term=inp, headers={'Referer': 'http://m.urbandictionary.com'})
defs = page['list']
if page['result_type'] == 'no_results':
return 'not found.'
out = defs[0]['word'] + ': ' + defs[0]['definition'].replace('\r\n', ' ')
if len(out) > 400:
out = out[:out.rfind(' ', 0, 400)] + '...'
return out
# define plugin by GhettoWizard & Scaevolus
@hook.command('dictionary')
@hook.command
def define(inp):
".define/.dictionary <word> -- fetches definition of <word>"
url = 'http://ninjawords.com/'
h = http.get_html(url + http.quote_plus(inp))
definition = h.xpath('//dd[@class="article"] | '
'//div[@class="definition"] |'
'//div[@class="example"]')
if not definition:
return 'No results for ' + inp
def format_output(show_examples):
result = '%s: ' % h.xpath('//dt[@class="title-word"]/a/text()')[0]
correction = h.xpath('//span[@class="correct-word"]/text()')
if correction:
result = 'definition for "%s": ' % correction[0]
sections = []
for section in definition:
if section.attrib['class'] == 'article':
sections += [[section.text_content() + ': ']]
elif section.attrib['class'] == 'example':
if show_examples:
sections[-1][-1] += ' ' + section.text_content()
else:
sections[-1] += [section.text_content()]
for article in sections:
result += article[0]
if len(article) > 2:
result += ' '.join('%d. %s' % (n + 1, section)
for n, section in enumerate(article[1:]))
else:
result += article[1] + ' '
synonyms = h.xpath('//dd[@class="synonyms"]')
if synonyms:
result += synonyms[0].text_content()
result = re.sub(r'\s+', ' ', result)
result = re.sub('\xb0', '', result)
return result
result = format_output(True)
if len(result) > 450:
result = format_output(False)
if len(result) > 450:
result = result[:result.rfind(' ', 0, 450)]
result = re.sub(r'[^A-Za-z]+\.?$', '', result) + ' ...'
return result
@hook.command('e')
@hook.command
def etymology(inp):
".e/.etymology <word> -- Retrieves the etymology of chosen word"
url = 'http://www.etymonline.com/index.php'
h = http.get_html(url, term=inp)
etym = h.xpath('//dl')
if not etym:
return 'No etymology found for ' + inp
etym = etym[0].text_content()
etym = ' '.join(etym.split())
if len(etym) > 400:
etym = etym[:etym.rfind(' ', 0, 400)] + ' ...'
return etym
| unlicense |
ksmit799/Toontown-Source | toontown/effects/DustCloud.py | 1 | 2862 | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.showbase import PythonUtil
from toontown.battle.BattleProps import globalPropPool
from direct.directnotify import DirectNotifyGlobal
SFX = PythonUtil.Enum('poof, magic')
SFXPATHS = {SFX.poof: 'phase_4/audio/sfx/firework_distance_02.mp3',
SFX.magic: 'phase_4/audio/sfx/SZ_DD_treasure.mp3'}
class DustCloud(NodePath):
dustCloudCount = 0
sounds = {}
notify = DirectNotifyGlobal.directNotify.newCategory('DustCloud')
def __init__(self, parent = hidden, fBillboard = 1, wantSound = 0):
NodePath.__init__(self)
self.assign(globalPropPool.getProp('suit_explosion_dust'))
if fBillboard:
self.setBillboardAxis()
self.reparentTo(parent)
self.seqNode = self.find('**/+SequenceNode').node()
self.seqNode.setFrameRate(0)
self.wantSound = wantSound
if self.wantSound and not DustCloud.sounds:
DustCloud.sounds[SFX.poof] = loader.loadSfx(SFXPATHS[SFX.poof])
self.track = None
self.trackId = DustCloud.dustCloudCount
DustCloud.dustCloudCount += 1
self.setBin('fixed', 100, 1)
self.hide()
return
def createTrack(self, rate = 24):
def getSoundFuncIfAble(soundId):
sound = DustCloud.sounds.get(soundId)
if self.wantSound and sound:
return sound.play
else:
def dummy():
pass
return dummy
tflipDuration = self.seqNode.getNumChildren() / float(rate)
self.track = Sequence(Func(self.show), Func(self.messaging), Func(self.seqNode.play, 0, self.seqNode.getNumFrames() - 1), Func(self.seqNode.setFrameRate, rate), Func(getSoundFuncIfAble(SFX.poof)), Wait(tflipDuration), Func(self._resetTrack), name='dustCloud-track-%d' % self.trackId)
def _resetTrack(self):
self.seqNode.setFrameRate(0)
self.hide()
def messaging(self):
self.notify.debug('CREATING TRACK ID: %s' % self.trackId)
def isPlaying(self):
if self.track == None:
return False
if self.track.isPlaying():
return True
else:
return False
return
def play(self, rate = 24):
self.stop()
self.createTrack(rate)
self.track.start()
def loop(self, rate = 24):
self.stop()
self.createTrack(rate)
self.track.loop()
def stop(self):
if self.track:
self.track.finish()
self.track.clearToInitial()
def destroy(self):
self.notify.debug('DESTROYING TRACK ID: %s' % self.trackId)
if self.track:
self._resetTrack()
self.track.clearToInitial()
del self.track
del self.seqNode
self.removeNode()
| mit |
Lyleo/OmniMarkupPreviewer | OmniMarkupLib/Renderers/libs/python3/docutils/statemachine.py | 2 | 57570 | # $Id$
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A finite state machine specialized for regular-expression-based text filters,
this module defines the following classes:
- `StateMachine`, a state machine
- `State`, a state superclass
- `StateMachineWS`, a whitespace-sensitive version of `StateMachine`
- `StateWS`, a state superclass for use with `StateMachineWS`
- `SearchStateMachine`, uses `re.search()` instead of `re.match()`
- `SearchStateMachineWS`, uses `re.search()` instead of `re.match()`
- `ViewList`, extends standard Python lists.
- `StringList`, string-specific ViewList.
Exception classes:
- `StateMachineError`
- `UnknownStateError`
- `DuplicateStateError`
- `UnknownTransitionError`
- `DuplicateTransitionError`
- `TransitionPatternNotFound`
- `TransitionMethodNotFound`
- `UnexpectedIndentationError`
- `TransitionCorrection`: Raised to switch to another transition.
- `StateCorrection`: Raised to switch to another state & transition.
Functions:
- `string2lines()`: split a multi-line string into a list of one-line strings
How To Use This Module
======================
(See the individual classes, methods, and attributes for details.)
1. Import it: ``import statemachine`` or ``from statemachine import ...``.
You will also need to ``import re``.
2. Derive a subclass of `State` (or `StateWS`) for each state in your state
machine::
class MyState(statemachine.State):
Within the state's class definition:
a) Include a pattern for each transition, in `State.patterns`::
patterns = {'atransition': r'pattern', ...}
b) Include a list of initial transitions to be set up automatically, in
`State.initial_transitions`::
initial_transitions = ['atransition', ...]
c) Define a method for each transition, with the same name as the
transition pattern::
def atransition(self, match, context, next_state):
# do something
result = [...] # a list
return context, next_state, result
# context, next_state may be altered
Transition methods may raise an `EOFError` to cut processing short.
d) You may wish to override the `State.bof()` and/or `State.eof()` implicit
transition methods, which handle the beginning- and end-of-file.
e) In order to handle nested processing, you may wish to override the
attributes `State.nested_sm` and/or `State.nested_sm_kwargs`.
If you are using `StateWS` as a base class, in order to handle nested
indented blocks, you may wish to:
- override the attributes `StateWS.indent_sm`,
`StateWS.indent_sm_kwargs`, `StateWS.known_indent_sm`, and/or
`StateWS.known_indent_sm_kwargs`;
- override the `StateWS.blank()` method; and/or
- override or extend the `StateWS.indent()`, `StateWS.known_indent()`,
and/or `StateWS.firstknown_indent()` methods.
3. Create a state machine object::
sm = StateMachine(state_classes=[MyState, ...],
initial_state='MyState')
4. Obtain the input text, which needs to be converted into a tab-free list of
one-line strings. For example, to read text from a file called
'inputfile'::
input_string = open('inputfile').read()
input_lines = statemachine.string2lines(input_string)
5. Run the state machine on the input text and collect the results, a list::
results = sm.run(input_lines)
6. Remove any lingering circular references::
sm.unlink()
"""
__docformat__ = 'restructuredtext'
import sys
import re
import types
import unicodedata
from docutils import utils
from docutils.utils.error_reporting import ErrorOutput
class StateMachine:
"""
A finite state machine for text filters using regular expressions.
The input is provided in the form of a list of one-line strings (no
newlines). States are subclasses of the `State` class. Transitions consist
of regular expression patterns and transition methods, and are defined in
each state.
The state machine is started with the `run()` method, which returns the
results of processing in a list.
"""
def __init__(self, state_classes, initial_state, debug=False):
"""
Initialize a `StateMachine` object; add state objects.
Parameters:
- `state_classes`: a list of `State` (sub)classes.
- `initial_state`: a string, the class name of the initial state.
- `debug`: a boolean; produce verbose output if true (nonzero).
"""
self.input_lines = None
"""`StringList` of input lines (without newlines).
Filled by `self.run()`."""
self.input_offset = 0
"""Offset of `self.input_lines` from the beginning of the file."""
self.line = None
"""Current input line."""
self.line_offset = -1
"""Current input line offset from beginning of `self.input_lines`."""
self.debug = debug
"""Debugging mode on/off."""
self.initial_state = initial_state
"""The name of the initial state (key to `self.states`)."""
self.current_state = initial_state
"""The name of the current state (key to `self.states`)."""
self.states = {}
"""Mapping of {state_name: State_object}."""
self.add_states(state_classes)
self.observers = []
"""List of bound methods or functions to call whenever the current
line changes. Observers are called with one argument, ``self``.
Cleared at the end of `run()`."""
self._stderr = ErrorOutput()
"""Wrapper around sys.stderr catching en-/decoding errors"""
def unlink(self):
"""Remove circular references to objects no longer required."""
for state in list(self.states.values()):
state.unlink()
self.states = None
def run(self, input_lines, input_offset=0, context=None,
input_source=None, initial_state=None):
"""
Run the state machine on `input_lines`. Return results (a list).
Reset `self.line_offset` and `self.current_state`. Run the
beginning-of-file transition. Input one line at a time and check for a
matching transition. If a match is found, call the transition method
and possibly change the state. Store the context returned by the
transition method to be passed on to the next transition matched.
Accumulate the results returned by the transition methods in a list.
Run the end-of-file transition. Finally, return the accumulated
results.
Parameters:
- `input_lines`: a list of strings without newlines, or `StringList`.
- `input_offset`: the line offset of `input_lines` from the beginning
of the file.
- `context`: application-specific storage.
- `input_source`: name or path of source of `input_lines`.
- `initial_state`: name of initial state.
"""
self.runtime_init()
if isinstance(input_lines, StringList):
self.input_lines = input_lines
else:
self.input_lines = StringList(input_lines, source=input_source)
self.input_offset = input_offset
self.line_offset = -1
self.current_state = initial_state or self.initial_state
if self.debug:
print((
'\nStateMachine.run: input_lines (line_offset=%s):\n| %s'
% (self.line_offset, '\n| '.join(self.input_lines))), file=self._stderr)
transitions = None
results = []
state = self.get_state()
try:
if self.debug:
print('\nStateMachine.run: bof transition', file=self._stderr)
context, result = state.bof(context)
results.extend(result)
while True:
try:
try:
self.next_line()
if self.debug:
source, offset = self.input_lines.info(
self.line_offset)
print((
'\nStateMachine.run: line (source=%r, '
'offset=%r):\n| %s'
% (source, offset, self.line)), file=self._stderr)
context, next_state, result = self.check_line(
context, state, transitions)
except EOFError:
if self.debug:
print((
'\nStateMachine.run: %s.eof transition'
% state.__class__.__name__), file=self._stderr)
result = state.eof(context)
results.extend(result)
break
else:
results.extend(result)
except TransitionCorrection as exception:
self.previous_line() # back up for another try
transitions = (exception.args[0],)
if self.debug:
print((
'\nStateMachine.run: TransitionCorrection to '
'state "%s", transition %s.'
% (state.__class__.__name__, transitions[0])), file=self._stderr)
continue
except StateCorrection as exception:
self.previous_line() # back up for another try
next_state = exception.args[0]
if len(exception.args) == 1:
transitions = None
else:
transitions = (exception.args[1],)
if self.debug:
print((
'\nStateMachine.run: StateCorrection to state '
'"%s", transition %s.'
% (next_state, transitions[0])), file=self._stderr)
else:
transitions = None
state = self.get_state(next_state)
except:
if self.debug:
self.error()
raise
self.observers = []
return results
def get_state(self, next_state=None):
"""
Return current state object; set it first if `next_state` given.
Parameter `next_state`: a string, the name of the next state.
Exception: `UnknownStateError` raised if `next_state` unknown.
"""
if next_state:
if self.debug and next_state != self.current_state:
print((
'\nStateMachine.get_state: Changing state from '
'"%s" to "%s" (input line %s).'
% (self.current_state, next_state,
self.abs_line_number())), file=self._stderr)
self.current_state = next_state
try:
return self.states[self.current_state]
except KeyError:
raise UnknownStateError(self.current_state)
def next_line(self, n=1):
"""Load `self.line` with the `n`'th next line and return it."""
try:
try:
self.line_offset += n
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def is_next_line_blank(self):
"""Return 1 if the next line is blank or non-existant."""
try:
return not self.input_lines[self.line_offset + 1].strip()
except IndexError:
return 1
def at_eof(self):
"""Return 1 if the input is at or past end-of-file."""
return self.line_offset >= len(self.input_lines) - 1
def at_bof(self):
"""Return 1 if the input is at or before beginning-of-file."""
return self.line_offset <= 0
def previous_line(self, n=1):
"""Load `self.line` with the `n`'th previous line and return it."""
self.line_offset -= n
if self.line_offset < 0:
self.line = None
else:
self.line = self.input_lines[self.line_offset]
self.notify_observers()
return self.line
def goto_line(self, line_offset):
"""Jump to absolute line offset `line_offset`, load and return it."""
try:
try:
self.line_offset = line_offset - self.input_offset
self.line = self.input_lines[self.line_offset]
except IndexError:
self.line = None
raise EOFError
return self.line
finally:
self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self):
"""Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset
def abs_line_number(self):
"""Return line number of current line (counting from 1)."""
return self.line_offset + self.input_offset + 1
def get_source_and_line(self, lineno=None):
"""Return (source, line) tuple for current or given line number.
Looks up the source and line number in the `self.input_lines`
StringList instance to count for included source files.
If the optional argument `lineno` is given, convert it from an
absolute line number to the corresponding (source, line) pair.
"""
if lineno is None:
offset = self.line_offset
else:
offset = lineno - self.input_offset - 1
try:
src, srcoffset = self.input_lines.info(offset)
srcline = srcoffset + 1
except (TypeError):
# line is None if index is "Just past the end"
src, srcline = self.get_source_and_line(offset + self.input_offset)
return src, srcline + 1
except (IndexError): # `offset` is off the list
src, srcline = None, None
# raise AssertionError('cannot find line %d in %s lines' %
# (offset, len(self.input_lines)))
# # list(self.input_lines.lines())))
# assert offset == srcoffset, str(self.input_lines)
# print "get_source_and_line(%s):" % lineno,
# print offset + 1, '->', src, srcline
# print self.input_lines
return (src, srcline)
def insert_input(self, input_lines, source):
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding after '+source,
offset=len(input_lines))
self.input_lines.insert(self.line_offset + 1, '',
source='internal padding before '+source,
offset=-1)
self.input_lines.insert(self.line_offset + 2,
StringList(input_lines, source))
def get_text_block(self, flush_left=False):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
try:
block = self.input_lines.get_text_block(self.line_offset,
flush_left)
self.next_line(len(block) - 1)
return block
except UnexpectedIndentationError as err:
block = err.args[0]
self.next_line(len(block) - 1) # advance to last line of block
raise
def check_line(self, context, state, transitions=None):
"""
Examine one line of input for a transition match & execute its method.
Parameters:
- `context`: application-dependent storage.
- `state`: a `State` object, the current state.
- `transitions`: an optional ordered list of transition names to try,
instead of ``state.transition_order``.
Return the values returned by the transition method:
- context: possibly modified from the parameter `context`;
- next state name (`State` subclass name);
- the result output of the transition, a list.
When there is no match, ``state.no_match()`` is called and its return
value is returned.
"""
if transitions is None:
transitions = state.transition_order
state_correction = None
if self.debug:
print((
'\nStateMachine.check_line: state="%s", transitions=%r.'
% (state.__class__.__name__, transitions)), file=self._stderr)
for name in transitions:
pattern, method, next_state = state.transitions[name]
match = pattern.match(self.line)
if match:
if self.debug:
print((
'\nStateMachine.check_line: Matched transition '
'"%s" in state "%s".'
% (name, state.__class__.__name__)), file=self._stderr)
return method(match, context, next_state)
else:
if self.debug:
print((
'\nStateMachine.check_line: No match in state "%s".'
% state.__class__.__name__), file=self._stderr)
return state.no_match(context, transitions)
def add_state(self, state_class):
"""
Initialize & add a `state_class` (`State` subclass) object.
Exception: `DuplicateStateError` raised if `state_class` was already
added.
"""
statename = state_class.__name__
if statename in self.states:
raise DuplicateStateError(statename)
self.states[statename] = state_class(self, self.debug)
def add_states(self, state_classes):
"""
Add `state_classes` (a list of `State` subclasses).
"""
for state_class in state_classes:
self.add_state(state_class)
def runtime_init(self):
"""
Initialize `self.states`.
"""
for state in list(self.states.values()):
state.runtime_init()
def error(self):
"""Report error details."""
type, value, module, line, function = _exception_data()
print('%s: %s' % (type, value), file=self._stderr)
print('input line %s' % (self.abs_line_number()), file=self._stderr)
print(('module %s, line %s, function %s' %
(module, line, function)), file=self._stderr)
def attach_observer(self, observer):
"""
The `observer` parameter is a function or bound method which takes two
arguments, the source and offset of the current line.
"""
self.observers.append(observer)
def detach_observer(self, observer):
self.observers.remove(observer)
def notify_observers(self):
for observer in self.observers:
try:
info = self.input_lines.info(self.line_offset)
except IndexError:
info = (None, None)
observer(*info)
class State:
"""
State superclass. Contains a list of transitions, and transition methods.
Transition methods all have the same signature. They take 3 parameters:
- An `re` match object. ``match.string`` contains the matched input line,
``match.start()`` gives the start index of the match, and
``match.end()`` gives the end index.
- A context object, whose meaning is application-defined (initial value
``None``). It can be used to store any information required by the state
machine, and the retured context is passed on to the next transition
method unchanged.
- The name of the next state, a string, taken from the transitions list;
normally it is returned unchanged, but it may be altered by the
transition method if necessary.
Transition methods all return a 3-tuple:
- A context object, as (potentially) modified by the transition method.
- The next state name (a return value of ``None`` means no state change).
- The processing result, a list, which is accumulated by the state
machine.
Transition methods may raise an `EOFError` to cut processing short.
There are two implicit transitions, and corresponding transition methods
are defined: `bof()` handles the beginning-of-file, and `eof()` handles
the end-of-file. These methods have non-standard signatures and return
values. `bof()` returns the initial context and results, and may be used
to return a header string, or do any other processing needed. `eof()`
should handle any remaining context and wrap things up; it returns the
final processing result.
Typical applications need only subclass `State` (or a subclass), set the
`patterns` and `initial_transitions` class attributes, and provide
corresponding transition methods. The default object initialization will
take care of constructing the list of transitions.
"""
patterns = None
"""
{Name: pattern} mapping, used by `make_transition()`. Each pattern may
be a string or a compiled `re` pattern. Override in subclasses.
"""
initial_transitions = None
"""
A list of transitions to initialize when a `State` is instantiated.
Each entry is either a transition name string, or a (transition name, next
state name) pair. See `make_transitions()`. Override in subclasses.
"""
nested_sm = None
"""
The `StateMachine` class for handling nested processing.
If left as ``None``, `nested_sm` defaults to the class of the state's
controlling state machine. Override it in subclasses to avoid the default.
"""
nested_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `nested_sm` constructor.
Two keys must have entries in the dictionary:
- Key 'state_classes' must be set to a list of `State` classes.
- Key 'initial_state' must be set to the name of the initial state class.
If `nested_sm_kwargs` is left as ``None``, 'state_classes' defaults to the
class of the current state, and 'initial_state' defaults to the name of
the class of the current state. Override in subclasses to avoid the
defaults.
"""
def __init__(self, state_machine, debug=False):
"""
Initialize a `State` object; make & add initial transitions.
Parameters:
- `statemachine`: the controlling `StateMachine` object.
- `debug`: a boolean; produce verbose output if true.
"""
self.transition_order = []
"""A list of transition names in search order."""
self.transitions = {}
"""
A mapping of transition names to 3-tuples containing
(compiled_pattern, transition_method, next_state_name). Initialized as
an instance attribute dynamically (instead of as a class attribute)
because it may make forward references to patterns and methods in this
or other classes.
"""
self.add_initial_transitions()
self.state_machine = state_machine
"""A reference to the controlling `StateMachine` object."""
self.debug = debug
"""Debugging mode on/off."""
if self.nested_sm is None:
self.nested_sm = self.state_machine.__class__
if self.nested_sm_kwargs is None:
self.nested_sm_kwargs = {'state_classes': [self.__class__],
'initial_state': self.__class__.__name__}
def runtime_init(self):
"""
Initialize this `State` before running the state machine; called from
`self.state_machine.run()`.
"""
pass
def unlink(self):
"""Remove circular references to objects no longer required."""
self.state_machine = None
def add_initial_transitions(self):
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
"""
Add a list of transitions to the start of the transition list.
Parameters:
- `names`: a list of transition names.
- `transitions`: a mapping of names to transition tuples.
Exceptions: `DuplicateTransitionError`, `UnknownTransitionError`.
"""
for name in names:
if name in self.transitions:
raise DuplicateTransitionError(name)
if name not in transitions:
raise UnknownTransitionError(name)
self.transition_order[:0] = names
self.transitions.update(transitions)
def add_transition(self, name, transition):
"""
Add a transition to the start of the transition list.
Parameter `transition`: a ready-made transition 3-tuple.
Exception: `DuplicateTransitionError`.
"""
if name in self.transitions:
raise DuplicateTransitionError(name)
self.transition_order[:0] = [name]
self.transitions[name] = transition
def remove_transition(self, name):
"""
Remove a transition by `name`.
Exception: `UnknownTransitionError`.
"""
try:
del self.transitions[name]
self.transition_order.remove(name)
except:
raise UnknownTransitionError(name)
def make_transition(self, name, next_state=None):
"""
Make & return a transition tuple based on `name`.
This is a convenience function to simplify transition creation.
Parameters:
- `name`: a string, the name of the transition pattern & method. This
`State` object must have a method called '`name`', and a dictionary
`self.patterns` containing a key '`name`'.
- `next_state`: a string, the name of the next `State` object for this
transition. A value of ``None`` (or absent) implies no state change
(i.e., continue with the same state).
Exceptions: `TransitionPatternNotFound`, `TransitionMethodNotFound`.
"""
if next_state is None:
next_state = self.__class__.__name__
try:
pattern = self.patterns[name]
if not hasattr(pattern, 'match'):
pattern = re.compile(pattern)
except KeyError:
raise TransitionPatternNotFound(
'%s.patterns[%r]' % (self.__class__.__name__, name))
try:
method = getattr(self, name)
except AttributeError:
raise TransitionMethodNotFound(
'%s.%s' % (self.__class__.__name__, name))
return (pattern, method, next_state)
def make_transitions(self, name_list):
"""
Return a list of transition names and a transition mapping.
Parameter `name_list`: a list, where each entry is either a transition
name string, or a 1- or 2-tuple (transition name, optional next state
name).
"""
stringtype = type('')
names = []
transitions = {}
for namestate in name_list:
if type(namestate) is stringtype:
transitions[namestate] = self.make_transition(namestate)
names.append(namestate)
else:
transitions[namestate[0]] = self.make_transition(*namestate)
names.append(namestate[0])
return names, transitions
def no_match(self, context, transitions):
"""
Called when there is no match from `StateMachine.check_line()`.
Return the same values returned by transition methods:
- context: unchanged;
- next state name: ``None``;
- empty result list.
Override in subclasses to catch this event.
"""
return context, None, []
def bof(self, context):
"""
Handle beginning-of-file. Return unchanged `context`, empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return context, []
def eof(self, context):
"""
Handle end-of-file. Return empty result.
Override in subclasses.
Parameter `context`: application-defined storage.
"""
return []
def nop(self, match, context, next_state):
"""
A "do nothing" transition method.
Return unchanged `context` & `next_state`, empty result. Useful for
simple state changes (actionless transitions).
"""
return context, next_state, []
class StateMachineWS(StateMachine):
"""
`StateMachine` subclass specialized for whitespace recognition.
There are three methods provided for extracting indented text blocks:
- `get_indented()`: use when the indent is unknown.
- `get_known_indented()`: use when the indent is known for all lines.
- `get_first_known_indented()`: use when only the first line's indent is
known.
"""
def get_indented(self, until_blank=False, strip_indent=True):
"""
Return a block of indented lines of text, and info.
Extract an indented block where the indent is unknown for all lines.
:Parameters:
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
:Return:
- the indented block (a list of lines of text),
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent)
if indented:
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
def get_known_indented(self, indent, until_blank=False, strip_indent=True):
"""
Return an indented block and info.
Extract an indented block where the indent is known for all lines.
Starting with the current line, extract the entire text block with at
least `indent` indentation (which must be whitespace, except for the
first line).
:Parameters:
- `indent`: The number of indent columns/characters.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip `indent` characters of indentation if true
(default).
:Return:
- the indented block,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
block_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, offset, blank_finish
def get_first_known_indented(self, indent, until_blank=False,
strip_indent=True, strip_top=True):
"""
Return an indented block and info.
Extract an indented block where the indent is known for the first line
and unknown for all other lines.
:Parameters:
- `indent`: The first line's indent (# of columns/characters).
- `until_blank`: Stop collecting at the first blank line if true
(1).
- `strip_indent`: Strip `indent` characters of indentation if true
(1, default).
- `strip_top`: Strip blank lines from the beginning of the block.
:Return:
- the indented block,
- its indent,
- its first line offset from BOF, and
- whether or not it finished with a blank line.
"""
offset = self.abs_line_offset()
indented, indent, blank_finish = self.input_lines.get_indented(
self.line_offset, until_blank, strip_indent,
first_indent=indent)
self.next_line(len(indented) - 1) # advance to last indented line
if strip_top:
while indented and not indented[0].strip():
indented.trim_start()
offset += 1
return indented, indent, offset, blank_finish
class StateWS(State):
"""
State superclass specialized for whitespace (blank lines & indents).
Use this class with `StateMachineWS`. The transitions 'blank' (for blank
lines) and 'indent' (for indented text blocks) are added automatically,
before any other transitions. The transition method `blank()` handles
blank lines and `indent()` handles nested indented blocks. Indented
blocks trigger a new state machine to be created by `indent()` and run.
The class of the state machine to be created is in `indent_sm`, and the
constructor keyword arguments are in the dictionary `indent_sm_kwargs`.
The methods `known_indent()` and `firstknown_indent()` are provided for
indented blocks where the indent (all lines' and first line's only,
respectively) is known to the transition method, along with the attributes
`known_indent_sm` and `known_indent_sm_kwargs`. Neither transition method
is triggered automatically.
"""
indent_sm = None
"""
The `StateMachine` class handling indented text blocks.
If left as ``None``, `indent_sm` defaults to the value of
`State.nested_sm`. Override it in subclasses to avoid the default.
"""
indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `indent_sm` constructor.
If left as ``None``, `indent_sm_kwargs` defaults to the value of
`State.nested_sm_kwargs`. Override it in subclasses to avoid the default.
"""
known_indent_sm = None
"""
The `StateMachine` class handling known-indented text blocks.
If left as ``None``, `known_indent_sm` defaults to the value of
`indent_sm`. Override it in subclasses to avoid the default.
"""
known_indent_sm_kwargs = None
"""
Keyword arguments dictionary, passed to the `known_indent_sm` constructor.
If left as ``None``, `known_indent_sm_kwargs` defaults to the value of
`indent_sm_kwargs`. Override it in subclasses to avoid the default.
"""
ws_patterns = {'blank': ' *$',
'indent': ' +'}
"""Patterns for default whitespace transitions. May be overridden in
subclasses."""
ws_initial_transitions = ('blank', 'indent')
"""Default initial whitespace transitions, added before those listed in
`State.initial_transitions`. May be overridden in subclasses."""
def __init__(self, state_machine, debug=False):
"""
Initialize a `StateSM` object; extends `State.__init__()`.
Check for indent state machine attributes, set defaults if not set.
"""
State.__init__(self, state_machine, debug)
if self.indent_sm is None:
self.indent_sm = self.nested_sm
if self.indent_sm_kwargs is None:
self.indent_sm_kwargs = self.nested_sm_kwargs
if self.known_indent_sm is None:
self.known_indent_sm = self.indent_sm
if self.known_indent_sm_kwargs is None:
self.known_indent_sm_kwargs = self.indent_sm_kwargs
def add_initial_transitions(self):
"""
Add whitespace-specific transitions before those defined in subclass.
Extends `State.add_initial_transitions()`.
"""
State.add_initial_transitions(self)
if self.patterns is None:
self.patterns = {}
self.patterns.update(self.ws_patterns)
names, transitions = self.make_transitions(
self.ws_initial_transitions)
self.add_transitions(names, transitions)
def blank(self, match, context, next_state):
"""Handle blank lines. Does nothing. Override in subclasses."""
return self.nop(match, context, next_state)
def indent(self, match, context, next_state):
"""
Handle an indented text block. Extend or override in subclasses.
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
indented, indent, line_offset, blank_finish = \
self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def known_indent(self, match, context, next_state):
"""
Handle a known-indent text block. Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
def first_known_indent(self, match, context, next_state):
"""
Handle an indented text block (first line's indent known).
Extend or override in subclasses.
Recursively run the registered state machine for known-indent indented
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
indented, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
**self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
class _SearchOverride:
"""
Mix-in class to override `StateMachine` regular expression behavior.
Changes regular expression matching, from the default `re.match()`
(succeeds only if the pattern matches at the start of `self.line`) to
`re.search()` (succeeds if the pattern matches anywhere in `self.line`).
When subclassing a `StateMachine`, list this class **first** in the
inheritance list of the class definition.
"""
def match(self, pattern):
"""
Return the result of a regular expression search.
Overrides `StateMachine.match()`.
Parameter `pattern`: `re` compiled regular expression.
"""
return pattern.search(self.line)
class SearchStateMachine(_SearchOverride, StateMachine):
"""`StateMachine` which uses `re.search()` instead of `re.match()`."""
pass
class SearchStateMachineWS(_SearchOverride, StateMachineWS):
"""`StateMachineWS` which uses `re.search()` instead of `re.match()`."""
pass
class ViewList:
"""
List with extended functionality: slices of ViewList objects are child
lists, linked to their parents. Changes made to a child list also affect
the parent list. A child list is effectively a "view" (in the SQL sense)
of the parent list. Changes to parent lists, however, do *not* affect
active child lists. If a parent list is changed, any active child lists
should be recreated.
The start and end of the slice can be trimmed using the `trim_start()` and
`trim_end()` methods, without affecting the parent list. The link between
child and parent lists can be broken by calling `disconnect()` on the
child list.
Also, ViewList objects keep track of the source & offset of each item.
This information is accessible via the `source()`, `offset()`, and
`info()` methods.
"""
def __init__(self, initlist=None, source=None, items=None,
parent=None, parent_offset=None):
self.data = []
"""The actual list of data, flattened from various sources."""
self.items = []
"""A list of (source, offset) pairs, same length as `self.data`: the
source of each line and the offset of each line from the beginning of
its source."""
self.parent = parent
"""The parent list."""
self.parent_offset = parent_offset
"""Offset of this list from the beginning of the parent list."""
if isinstance(initlist, ViewList):
self.data = initlist.data[:]
self.items = initlist.items[:]
elif initlist is not None:
self.data = list(initlist)
if items:
self.items = items
else:
self.items = [(source, i) for i in range(len(initlist))]
assert len(self.data) == len(self.items), 'data mismatch'
def __str__(self):
return str(self.data)
def __repr__(self):
return '%s(%s, items=%s)' % (self.__class__.__name__,
self.data, self.items)
def __lt__(self, other): return self.data < self.__cast(other)
def __le__(self, other): return self.data <= self.__cast(other)
def __eq__(self, other): return self.data == self.__cast(other)
def __ne__(self, other): return self.data != self.__cast(other)
def __gt__(self, other): return self.data > self.__cast(other)
def __ge__(self, other): return self.data >= self.__cast(other)
def __cmp__(self, other): return cmp(self.data, self.__cast(other))
def __cast(self, other):
if isinstance(other, ViewList):
return other.data
else:
return other
def __contains__(self, item): return item in self.data
def __len__(self): return len(self.data)
# The __getitem__()/__setitem__() methods check whether the index
# is a slice first, since indexing a native list with a slice object
# just works.
def __getitem__(self, i):
if isinstance(i, slice):
assert i.step in (None, 1), 'cannot handle slice with stride'
return self.__class__(self.data[i.start:i.stop],
items=self.items[i.start:i.stop],
parent=self, parent_offset=i.start or 0)
else:
return self.data[i]
def __setitem__(self, i, item):
if isinstance(i, slice):
assert i.step in (None, 1), 'cannot handle slice with stride'
if not isinstance(item, ViewList):
raise TypeError('assigning non-ViewList to ViewList slice')
self.data[i.start:i.stop] = item.data
self.items[i.start:i.stop] = item.items
assert len(self.data) == len(self.items), 'data mismatch'
if self.parent:
self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset] = item
else:
self.data[i] = item
if self.parent:
self.parent[i + self.parent_offset] = item
def __delitem__(self, i):
try:
del self.data[i]
del self.items[i]
if self.parent:
del self.parent[i + self.parent_offset]
except TypeError:
assert i.step is None, 'cannot handle slice with stride'
del self.data[i.start:i.stop]
del self.items[i.start:i.stop]
if self.parent:
del self.parent[(i.start or 0) + self.parent_offset
: (i.stop or len(self)) + self.parent_offset]
def __add__(self, other):
if isinstance(other, ViewList):
return self.__class__(self.data + other.data,
items=(self.items + other.items))
else:
raise TypeError('adding non-ViewList to a ViewList')
def __radd__(self, other):
if isinstance(other, ViewList):
return self.__class__(other.data + self.data,
items=(other.items + self.items))
else:
raise TypeError('adding ViewList to a non-ViewList')
def __iadd__(self, other):
if isinstance(other, ViewList):
self.data += other.data
else:
raise TypeError('argument to += must be a ViewList')
return self
def __mul__(self, n):
return self.__class__(self.data * n, items=(self.items * n))
__rmul__ = __mul__
def __imul__(self, n):
self.data *= n
self.items *= n
return self
def extend(self, other):
if not isinstance(other, ViewList):
raise TypeError('extending a ViewList with a non-ViewList')
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, other)
self.data.extend(other.data)
self.items.extend(other.items)
def append(self, item, source=None, offset=0):
if source is None:
self.extend(item)
else:
if self.parent:
self.parent.insert(len(self.data) + self.parent_offset, item,
source, offset)
self.data.append(item)
self.items.append((source, offset))
def insert(self, i, item, source=None, offset=0):
if source is None:
if not isinstance(item, ViewList):
raise TypeError('inserting non-ViewList with no source given')
self.data[i:i] = item.data
self.items[i:i] = item.items
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item)
else:
self.data.insert(i, item)
self.items.insert(i, (source, offset))
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.insert(index + self.parent_offset, item,
source, offset)
def pop(self, i=-1):
if self.parent:
index = (len(self.data) + i) % len(self.data)
self.parent.pop(index + self.parent_offset)
self.items.pop(i)
return self.data.pop(i)
def trim_start(self, n=1):
"""
Remove items from the start of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[:n]
del self.items[:n]
if self.parent:
self.parent_offset += n
def trim_end(self, n=1):
"""
Remove items from the end of the list, without touching the parent.
"""
if n > len(self.data):
raise IndexError("Size of trim too large; can't trim %s items "
"from a list of size %s." % (n, len(self.data)))
elif n < 0:
raise IndexError('Trim size must be >= 0.')
del self.data[-n:]
del self.items[-n:]
def remove(self, item):
index = self.index(item)
del self[index]
def count(self, item): return self.data.count(item)
def index(self, item): return self.data.index(item)
def reverse(self):
self.data.reverse()
self.items.reverse()
self.parent = None
def sort(self, *args):
tmp = list(zip(self.data, self.items))
tmp.sort(*args)
self.data = [entry[0] for entry in tmp]
self.items = [entry[1] for entry in tmp]
self.parent = None
def info(self, i):
"""Return source & offset for index `i`."""
try:
return self.items[i]
except IndexError:
if i == len(self.data): # Just past the end
return self.items[i - 1][0], None
else:
raise
def source(self, i):
"""Return source for index `i`."""
return self.info(i)[0]
def offset(self, i):
"""Return offset for index `i`."""
return self.info(i)[1]
def disconnect(self):
"""Break link between this list and parent list."""
self.parent = None
def xitems(self):
"""Return iterator yielding (source, offset, value) tuples."""
for (value, (source, offset)) in zip(self.data, self.items):
yield (source, offset, value)
def pprint(self):
"""Print the list in `grep` format (`source:offset:value` lines)"""
for line in self.xitems():
print("%s:%d:%s" % line)
class StringList(ViewList):
"""A `ViewList` with string-specific methods."""
def trim_left(self, length, start=0, end=sys.maxsize):
"""
Trim `length` characters off the beginning of each item, in-place,
from index `start` to `end`. No whitespace-checking is done on the
trimmed text. Does not affect slice parent.
"""
self.data[start:end] = [line[length:]
for line in self.data[start:end]]
def get_text_block(self, start, flush_left=False):
"""
Return a contiguous block of text.
If `flush_left` is true, raise `UnexpectedIndentationError` if an
indented line is encountered before the text block ends (with a blank
line).
"""
end = start
last = len(self.data)
while end < last:
line = self.data[end]
if not line.strip():
break
if flush_left and (line[0] == ' '):
source, offset = self.info(end)
raise UnexpectedIndentationError(self[start:end], source,
offset + 1)
end += 1
return self[start:end]
def get_indented(self, start=0, until_blank=False, strip_indent=True,
block_indent=None, first_indent=None):
"""
Extract and return a StringList of indented lines of text.
Collect all lines with indentation, determine the minimum indentation,
remove the minimum indentation from all indented lines (unless
`strip_indent` is false), and return them. All lines up to but not
including the first unindented line will be returned.
:Parameters:
- `start`: The index of the first line to examine.
- `until_blank`: Stop collecting at the first blank line if true.
- `strip_indent`: Strip common leading indent if true (default).
- `block_indent`: The indent of the entire block, if known.
- `first_indent`: The indent of the first line, if known.
:Return:
- a StringList of indented lines with mininum indent removed;
- the amount of the indent;
- a boolean: did the indented block finish with a blank line or EOF?
"""
indent = block_indent # start with None if unknown
end = start
if block_indent is not None and first_indent is None:
first_indent = block_indent
if first_indent is not None:
end += 1
last = len(self.data)
while end < last:
line = self.data[end]
if line and (line[0] != ' '
or (block_indent is not None
and line[:block_indent].strip())):
# Line not indented or insufficiently indented.
# Block finished properly iff the last indented line blank:
blank_finish = ((end > start)
and not self.data[end - 1].strip())
break
stripped = line.lstrip()
if not stripped: # blank line
if until_blank:
blank_finish = 1
break
elif block_indent is None:
line_indent = len(line) - len(stripped)
if indent is None:
indent = line_indent
else:
indent = min(indent, line_indent)
end += 1
else:
blank_finish = 1 # block ends at end of lines
block = self[start:end]
if first_indent is not None and block:
block.data[0] = block.data[0][first_indent:]
if indent and strip_indent:
block.trim_left(indent, start=(first_indent is not None))
return block, indent or 0, blank_finish
def get_2D_block(self, top, left, bottom, right, strip_indent=True):
block = self[top:bottom]
indent = right
for i in range(len(block.data)):
# get slice from line, care for combining characters
ci = utils.column_indices(block.data[i])
try:
left = ci[left]
except IndexError:
left += len(block.data[i]) - len(ci)
try:
right = ci[right]
except IndexError:
right += len(block.data[i]) - len(ci)
block.data[i] = line = block.data[i][left:right].rstrip()
if line:
indent = min(indent, len(line) - len(line.lstrip()))
if strip_indent and 0 < indent < right:
block.data = [line[indent:] for line in block.data]
return block
def pad_double_width(self, pad_char):
"""
Pad all double-width characters in self by appending `pad_char` to each.
For East Asian language support.
"""
if hasattr(unicodedata, 'east_asian_width'):
east_asian_width = unicodedata.east_asian_width
else:
return # new in Python 2.4
for i in range(len(self.data)):
line = self.data[i]
if isinstance(line, str):
new = []
for char in line:
new.append(char)
if east_asian_width(char) in 'WF': # 'W'ide & 'F'ull-width
new.append(pad_char)
self.data[i] = ''.join(new)
def replace(self, old, new):
"""Replace all occurrences of substring `old` with `new`."""
for i in range(len(self.data)):
self.data[i] = self.data[i].replace(old, new)
class StateMachineError(Exception): pass
class UnknownStateError(StateMachineError): pass
class DuplicateStateError(StateMachineError): pass
class UnknownTransitionError(StateMachineError): pass
class DuplicateTransitionError(StateMachineError): pass
class TransitionPatternNotFound(StateMachineError): pass
class TransitionMethodNotFound(StateMachineError): pass
class UnexpectedIndentationError(StateMachineError): pass
class TransitionCorrection(Exception):
"""
Raise from within a transition method to switch to another transition.
Raise with one argument, the new transition name.
"""
class StateCorrection(Exception):
"""
Raise from within a transition method to switch to another state.
Raise with one or two arguments: new state name, and an optional new
transition name.
"""
def string2lines(astring, tab_width=8, convert_whitespace=False,
whitespace=re.compile('[\v\f]')):
"""
Return a list of one-line strings with tabs expanded, no newlines, and
trailing whitespace stripped.
Each tab is expanded with between 1 and `tab_width` spaces, so that the
next character's index becomes a multiple of `tab_width` (8 by default).
Parameters:
- `astring`: a multi-line string.
- `tab_width`: the number of columns between tab stops.
- `convert_whitespace`: convert form feeds and vertical tabs to spaces?
"""
if convert_whitespace:
astring = whitespace.sub(' ', astring)
return [s.expandtabs(tab_width).rstrip() for s in astring.splitlines()]
def _exception_data():
"""
Return exception information:
- the exception's class name;
- the exception object;
- the name of the file containing the offending code;
- the line number of the offending code;
- the function name of the offending code.
"""
type, value, traceback = sys.exc_info()
while traceback.tb_next:
traceback = traceback.tb_next
code = traceback.tb_frame.f_code
return (type.__name__, value, code.co_filename, traceback.tb_lineno,
code.co_name)
| mit |
TeamEOS/external_chromium_org | tools/cr/cr/commands/prepare.py | 59 | 1781 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the prepare command."""
import cr
class PrepareCommand(cr.Command):
"""The implementation of the prepare command.
The prepare command is used to perform the steps needed to get an output
directory ready to use. These should not be the kind of things that need to
happen every time you build something, but the rarer things that you re-do
only when you get or add new source files, or change your build options.
This delegates all it's behavior to implementations of PrepareOut. These will
(mostly) be in the cr.actions package.
"""
def __init__(self):
super(PrepareCommand, self).__init__()
self.help = 'Prepares an output directory'
self.description = ("""
This does any preparation needed for the output directory, such as
running gyp.
""")
def Run(self):
self.Prepare()
@classmethod
def UpdateContext(cls):
for preparation in PrepareOut.Plugins():
preparation.UpdateContext()
@classmethod
def Prepare(cls):
cls.UpdateContext()
for preparation in PrepareOut.Plugins():
preparation.Prepare()
class PrepareOut(cr.Plugin, cr.Plugin.Type):
"""Base class for output directory preparation plugins.
See PrepareCommand for details.
"""
def UpdateContext(self):
"""Update the context if needed.
This is also used by commands that want the environment setup correctly, but
are not going to call Prepare directly (such as sync)."""
def Prepare(self):
"""All PrepareOut plugins must override this method to do their work."""
raise NotImplementedError('Must be overridden.')
| bsd-3-clause |
JingJunYin/tensorflow | tensorflow/contrib/cudnn_rnn/python/kernel_tests/cudnn_rnn_test.py | 3 | 45770 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Cudnn RNN models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import itertools
import os
import sys
import unittest
import numpy as np
from tensorflow.contrib.cudnn_rnn.python.layers import cudnn_rnn
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from tensorflow.contrib.rnn.python.ops import rnn as contrib_rnn_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradients_impl as gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn as rnn_lib
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
CUDNN_LSTM = cudnn_rnn_ops.CUDNN_LSTM
CUDNN_GRU = cudnn_rnn_ops.CUDNN_GRU
CUDNN_RNN_RELU = cudnn_rnn_ops.CUDNN_RNN_RELU
CUDNN_RNN_TANH = cudnn_rnn_ops.CUDNN_RNN_TANH
CUDNN_RNN_UNIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION
CUDNN_RNN_BIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION
CUDNN_LSTM_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_LSTM_PARAMS_PER_LAYER
CUDNN_GRU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_GRU_PARAMS_PER_LAYER
CUDNN_RNN_TANH_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_TANH_PARAMS_PER_LAYER
CUDNN_RNN_RELU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_RELU_PARAMS_PER_LAYER
class CudnnTestModel(object):
"""Model with convenient APIs for easier building and running test graph.
The graph built is used by all tests below to avoid repeatedly building
similar test graphs.
"""
def __init__(self,
rnn_mode,
num_layers,
num_units,
input_size,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
dtype=dtypes.float32,
training=False,
seed=None,
kernel_initializer=None,
bias_initializer=None):
if dtype not in (dtypes.float16, dtypes.float32, dtypes.float64):
raise ValueError("Invalid dtype: %s" % dtype)
self._dtype = dtype
self._inputs = array_ops.placeholder(
dtype=dtype, shape=[None, None, input_size], name="inputs")
h = array_ops.placeholder(
dtype=dtype, shape=[None, None, num_units], name="h")
c = array_ops.placeholder(
dtype=dtype, shape=[None, None, num_units], name="c")
if rnn_mode == CUDNN_LSTM:
model_fn = cudnn_rnn.CudnnLSTM
self._initial_state = (h, c)
elif rnn_mode == CUDNN_GRU:
model_fn = cudnn_rnn.CudnnGRU
self._initial_state = (h,)
elif rnn_mode == CUDNN_RNN_TANH:
model_fn = cudnn_rnn.CudnnRNNTanh
self._initial_state = (h,)
elif rnn_mode == CUDNN_RNN_RELU:
model_fn = cudnn_rnn.CudnnRNNRelu
self._initial_state = (h,)
else:
raise ValueError("Invalid rnn_mode: %s" % rnn_mode)
self._rnn = model_fn(
num_layers,
num_units,
direction=direction,
dropout=dropout,
dtype=dtype,
seed=seed,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
self._rnn.build([None, None, input_size])
self._outputs, self._output_state = self._rnn(
self._inputs, initial_state=self._initial_state, training=training)
def _AddUp(self, outputs, output_state):
total = math_ops.reduce_sum(outputs)
for s in output_state:
total += math_ops.reduce_sum(s)
return total
@property
def inputs(self):
return self._inputs
@property
def initial_state(self):
return self._initial_state
@property
def outputs(self):
return self._outputs
@property
def output_state(self):
return self._output_state
@property
def rnn(self):
return self._rnn
@property
def total_sum(self):
return self._AddUp(self.outputs, self.output_state)
def SynthesizeInput(self, seq_length, batch_size, seed=1234):
"""Synthesizes input and initial state values for testing."""
np.random.seed(seed)
num_layers = self._rnn.num_layers
dir_count = self._rnn.num_dirs
num_units = self._rnn.num_units
input_size = self._rnn.input_size
np_dtype = np.float32 if self._dtype == dtypes.float32 else np.float64
inputs = np.random.randn(seq_length, batch_size,
input_size).astype(np_dtype)
input_h = np.random.randn(num_layers * dir_count, batch_size,
num_units).astype(np_dtype)
if self._rnn.rnn_mode == CUDNN_LSTM:
input_c = np.random.randn(num_layers * dir_count, batch_size,
num_units).astype(np_dtype)
initial_state = (input_h, input_c)
else:
initial_state = (input_h,)
return inputs, initial_state
def ZeroState(self, batch_size):
num_layers = self._rnn.num_layers
dir_count = self._rnn.num_dirs
num_units = self._rnn.num_units
np_dtype = np.float32 if self._dtype == dtypes.float32 else np.float64
input_h = np.zeros((num_layers * dir_count, batch_size,
num_units)).astype(np_dtype)
if self._rnn.rnn_mode == CUDNN_LSTM:
input_c = np.zeros((num_layers * dir_count, batch_size,
num_units)).astype(np_dtype)
initial_state = (input_h, input_c)
else:
initial_state = (input_h,)
return initial_state
def FProp(self, inputs_t, initial_state_t, training):
"""Builds additional subgraph with given inputs and state.
Args:
inputs_t: a tensor.
initial_state_t: a tensor.
training: boolean, true if training mode.
Returns:
A tensor of the forward pass output of the model.
"""
outputs, output_state = self._rnn(
inputs_t, initial_state=initial_state_t, training=training)
return self._AddUp(outputs, output_state)
def Feed(self, sess, inputs, initial_state=None, return_sum=True):
"""Runs graph with given inputs and initial state."""
batch_size = inputs.shape[1]
if initial_state is None:
initial_state = self.ZeroState(batch_size)
if return_sum:
return sess.run(
self.total_sum,
feed_dict={self.inputs: inputs,
self.initial_state: initial_state})
else:
return sess.run(
[self.outputs, self.output_state],
feed_dict={self.inputs: inputs,
self.initial_state: initial_state})
def _CreateCudnnCompatibleCanonicalRNN(rnn, inputs, is_bidi=False, scope=None):
mode = rnn.rnn_mode
num_units = rnn.num_units
num_layers = rnn.num_layers
# To reuse cuDNN-trained models, must use cudnn compatible rnn cells.
if mode == CUDNN_LSTM:
single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleLSTMCell(num_units)
elif mode == CUDNN_GRU:
single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleGRUCell(num_units)
elif mode == CUDNN_RNN_TANH:
single_cell = (lambda: rnn_cell_impl.BasicRNNCell(num_units, math_ops.tanh))
elif mode == CUDNN_RNN_RELU:
single_cell = (
lambda: rnn_cell_impl.BasicRNNCell(num_units, gen_nn_ops.relu))
else:
raise ValueError("%s is not supported!" % mode)
if not is_bidi:
cell = rnn_cell_impl.MultiRNNCell(
[single_cell() for _ in range(num_layers)])
return rnn_lib.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, time_major=True, scope=scope)
else:
cells_fw = [single_cell() for _ in range(num_layers)]
cells_bw = [single_cell() for _ in range(num_layers)]
(outputs, output_state_fw,
output_state_bw) = contrib_rnn_lib.stack_bidirectional_dynamic_rnn(
cells_fw,
cells_bw,
inputs,
dtype=dtypes.float32,
time_major=True,
scope=scope)
return outputs, (output_state_fw, output_state_bw)
class CudnnRNNTestBasic(TensorFlowTestCase):
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testLayerBasic(self):
num_layers = 4
num_units = 2
batch_size = 8
direction = CUDNN_RNN_UNIDIRECTION
dir_count = 1
with vs.variable_scope("main"):
kernel_initializer = init_ops.constant_initializer(0.)
bias_initializer = init_ops.constant_initializer(0.)
inputs = random_ops.random_uniform([
num_layers * dir_count, batch_size, num_units], dtype=dtypes.float32)
lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
direction=direction,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name="awesome_lstm")
# Build the layer
outputs1, _ = lstm(inputs)
# Reuse the layer
outputs2, _ = lstm(inputs)
total_sum1 = math_ops.reduce_sum(outputs1)
total_sum2 = math_ops.reduce_sum(outputs2)
with vs.variable_scope("main", reuse=True):
lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
direction=direction,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name="awesome_lstm")
# Reuse the layer
outputs3, _ = lstm(inputs)
total_sum3 = math_ops.reduce_sum(outputs3)
self.assertEqual(1, len(variables.trainable_variables()))
self.assertEqual(1, len(ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS)))
self.assertEqual("main/awesome_lstm/opaque_kernel",
variables.trainable_variables()[0].op.name)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
(total_sum1_v, total_sum2_v, total_sum3_v) = sess.run(
[total_sum1, total_sum2, total_sum3])
self.assertEqual(0, total_sum1_v)
self.assertEqual(0, total_sum2_v)
self.assertEqual(0, total_sum3_v)
def testSaveableGraphDeviceAssignment(self):
num_layers = 4
num_units = 2
batch_size = 8
direction = CUDNN_RNN_UNIDIRECTION
dir_count = 1
def DeviceFn(op):
if op.type in ("Variable", "VariableV2"):
return "/cpu:0"
else:
return "/gpu:0"
with ops.Graph().as_default() as g:
with ops.device(DeviceFn):
with vs.variable_scope("main"):
kernel_initializer = init_ops.constant_initializer(3.14)
bias_initializer = init_ops.constant_initializer(1.59)
inputs = random_ops.random_uniform(
[num_layers * dir_count, batch_size, num_units],
dtype=dtypes.float32)
lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
direction=direction,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name="awesome_lstm")
outputs = lstm(inputs)
# saver is created in the scope of DeviceFn.
saver = saver_lib.Saver()
with self.test_session(use_gpu=True, graph=g) as sess:
save_path = os.path.join(self.get_temp_dir(),
"test-saveable-device-assignment")
sess.run(variables.global_variables_initializer())
saver.save(sess, save_path)
saver.restore(sess, save_path)
sess.run(outputs)
# TODO(jamesqin): Transform to parameterized test after it is included in the
# TF open source codebase.
class CudnnRNNTestSaveRestore(TensorFlowTestCase):
def _CompareWeights(self, lhs, rhs):
self.assertEqual(len(lhs), len(rhs))
for lw, rw in zip(lhs, rhs):
self.assertAllEqual(lw, rw)
def _CompareBiases(self, lhs, rhs, rnn_mode, num_layers, direction):
self.assertEqual(len(lhs), len(rhs))
if rnn_mode == CUDNN_LSTM:
num_params_per_layer = CUDNN_LSTM_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_GRU:
num_params_per_layer = CUDNN_GRU_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_RNN_TANH:
num_params_per_layer = CUDNN_RNN_TANH_PARAMS_PER_LAYER
else:
num_params_per_layer = CUDNN_RNN_RELU_PARAMS_PER_LAYER
num_dirs = 1 if direction == CUDNN_RNN_UNIDIRECTION else 2
num_params_per_layer *= num_dirs
self.assertEqual(num_params_per_layer * num_layers, len(lhs))
for i in range(num_layers):
layer_lhs = lhs[i * num_params_per_layer: (i+1) * num_params_per_layer]
layer_rhs = rhs[i * num_params_per_layer: (i+1) * num_params_per_layer]
if direction == CUDNN_RNN_UNIDIRECTION:
self._CompareSingleLayerBiases(layer_lhs, layer_rhs)
else:
size = len(layer_lhs)
fw_lhs, bw_lhs = layer_lhs[:size//2], layer_lhs[size//2:]
fw_rhs, bw_rhs = layer_rhs[:size//2], layer_rhs[size//2:]
self._CompareSingleLayerBiases(fw_lhs, fw_rhs)
self._CompareSingleLayerBiases(bw_lhs, bw_rhs)
def _CompareSingleLayerBiases(self, lhs, rhs):
self.assertEqual(len(lhs), len(rhs))
lf_lhs, rt_lhs = lhs[:len(lhs)//2], lhs[len(lhs)//2:]
lf_rhs, rt_rhs = rhs[:len(rhs)//2], rhs[len(rhs)//2:]
self.assertEqual(len(lf_lhs), len(rt_lhs))
self.assertEqual(len(lf_rhs), len(rt_rhs))
sum_lhs, sum_rhs = [], []
for lf, rt in zip(lf_lhs, rt_lhs):
sum_lhs.append(lf + rt)
for lf, rt in zip(lf_rhs, rt_rhs):
sum_rhs.append(lf + rt)
self.assertEqual(len(sum_lhs), len(sum_rhs))
for lf, rt in zip(sum_lhs, sum_rhs):
self.assertAllEqual(lf, rt)
def _TestSaveRestoreVariable(self, rnn_mode, direction, dtype):
input_size = 3
num_layers = 2
num_units = 7
with ops.Graph().as_default() as g:
random_seed.set_random_seed(1234)
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype)
rnn = model.rnn
save_path = os.path.join(self.get_temp_dir(),
"save-restore-variable-test")
saver = saver_lib.Saver()
weights, biases = model.rnn.saveable._OpaqueParamsToCanonical()
opaque_params = rnn.trainable_variables[0]
# CudnnTestModel() creates CudnnOpaqueParamsSaveable that helps saver save
# Cudnn vars in canonical format.
reset_op = state_ops.assign(
opaque_params,
array_ops.zeros(array_ops.shape(opaque_params), dtype=dtype))
# Passing graph explicitly, otherwise an old sess would be reused.
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
weights_v, biases_v = sess.run([weights, biases])
# Reset opaque param
sess.run(reset_op)
saver.restore(sess, save_path)
weights_v_restored, biases_v_restored = sess.run([weights, biases])
self._CompareWeights(weights_v, weights_v_restored)
self._CompareBiases(biases_v, biases_v_restored, rnn_mode, num_layers,
direction)
def _TestSaveRestoreTwoVariables(self, rnn_mode, direction, dtype):
input_size = 3
num_layers = 2
num_units = 7
with ops.Graph().as_default() as g:
random_seed.set_random_seed(1234)
with vs.variable_scope("m1"):
model1 = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype)
with vs.variable_scope("m2"):
model2 = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype)
opaque_params = (model1.rnn.trainable_variables[0],
model2.rnn.trainable_variables[0])
weights1, biases1 = model1.rnn.saveable._OpaqueParamsToCanonical()
weights2, biases2 = model2.rnn.saveable._OpaqueParamsToCanonical()
reset_params = [
state_ops.assign(params,
array_ops.zeros_like(params, dtype=dtype))
for params in opaque_params
]
reset_op = control_flow_ops.group(*reset_params)
save_path = os.path.join(self.get_temp_dir(),
"save-restore-variable-test2")
saver = saver_lib.Saver()
# Passing graph explicitly, otherwise an old sess would be reused.
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
weights1_v, biases1_v = sess.run([weights1, biases1])
weights2_v, biases2_v = sess.run([weights2, biases2])
sess.run(reset_op)
saver.restore(sess, save_path)
weights1_v_restored, biases1_v_restored = sess.run([weights1, biases1])
weights2_v_restored, biases2_v_restored = sess.run([weights2, biases2])
self._CompareWeights(weights1_v, weights1_v_restored)
self._CompareWeights(weights2_v, weights2_v_restored)
self._CompareBiases(biases1_v, biases1_v_restored, rnn_mode, num_layers,
direction)
self._CompareBiases(biases2_v, biases2_v_restored, rnn_mode, num_layers,
direction)
def _TestSaveRestoreOutput(self, rnn_mode, direction, dtype):
with ops.Graph().as_default() as g:
num_layers = 2
num_units = 7
input_size = 7
seq_length = 8
batch_size = 4
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype,
training=False)
rnn = model.rnn
save_path = os.path.join(self.get_temp_dir(), "save-restore-output-test")
saver = saver_lib.Saver()
# Only one opaque var in a cudnn layer.
assert len(rnn.trainable_variables) == 1
reset_params = state_ops.assign(
rnn.trainable_variables[0],
array_ops.zeros(
array_ops.shape(rnn.trainable_variables[0]), dtype=dtype))
# Passing graph explicitly, otherwise an old sess would be reused.
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
inputs, initial_state = model.SynthesizeInput(seq_length, batch_size)
total_sum_v = model.Feed(sess, inputs, initial_state)
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
sess.run(reset_params)
saver.restore(sess, save_path)
total_sum_v_restored = model.Feed(sess, inputs, initial_state)
self.assertAllClose(total_sum_v, total_sum_v_restored, atol=1e-5)
def _TestSaveRestoreHelper(self, rnn_mode):
directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]
dtype_list = [dtypes.float16, dtypes.float32, dtypes.float64]
for direction, dtype in itertools.product(directions, dtype_list):
self._TestSaveRestoreVariable(rnn_mode, direction, dtype)
self._TestSaveRestoreTwoVariables(rnn_mode, direction, dtype)
self._TestSaveRestoreOutput(rnn_mode, direction, dtype)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreRepeatedlyCreateCustomSaveable(self):
input_size = 3
num_layers = 2
num_units = 7
with ops.Graph().as_default():
random_seed.set_random_seed(1234)
model = CudnnTestModel(
CUDNN_LSTM,
num_layers,
num_units,
input_size,
direction=CUDNN_RNN_UNIDIRECTION,
dtype=dtypes.float32)
with self.assertRaisesRegexp(RuntimeError,
"Cudnn saveable already created"):
model.rnn._create_saveable()
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreLSTM(self):
self._TestSaveRestoreHelper(CUDNN_LSTM)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreGRU(self):
self._TestSaveRestoreHelper(CUDNN_GRU)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreRNNTanh(self):
self._TestSaveRestoreHelper(CUDNN_RNN_TANH)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreRNNRelu(self):
self._TestSaveRestoreHelper(CUDNN_RNN_RELU)
# TODO(jamesqin): Transform to parameterized test after it is included in the
# TF open source codebase.
class CudnnRNNTestCompatibleRNNCells(TensorFlowTestCase):
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleLSTM(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_LSTM)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleGRU(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_GRU)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleRNNTanh(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_RNN_TANH)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleRNNRelu(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_RNN_RELU)
def _TestCudnnCompatibleRnnCellsHelper(self, rnn_mode):
configs = [
{
"num_layers": 1,
"seq_length": 3,
"num_units": 4,
"input_size": 5,
"batch_size": 6,
},
{
"num_layers": 2,
"seq_length": 8,
"num_units": 4,
"input_size": 8,
"batch_size": 16,
},
{
"num_layers": 2,
"seq_length": 3,
"num_units": 4,
"input_size": 5,
"batch_size": 6,
},
{
"num_layers": 1,
"seq_length": 2,
"num_units": 2,
"input_size": 4,
"batch_size": 1,
},
]
directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]
for cfg, direction in zip(configs, directions):
self._TestCudnnCompatibleRnnCells(cfg["num_layers"], cfg["seq_length"],
cfg["num_units"], cfg["input_size"],
cfg["batch_size"], rnn_mode, direction)
def _TestCudnnCompatibleRnnCells(self, num_layers, seq_length, num_units,
input_size, batch_size, rnn_mode, direction):
dtype = dtypes.float32
# Train graph
with ops.Graph().as_default() as g:
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype,
training=True)
target_output = array_ops.placeholder(dtype=dtype)
loss_op = losses.log_loss(
labels=target_output, predictions=model.total_sum)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1e-2)
train_op = optimizer.minimize(loss_op)
saver = saver_lib.Saver()
# Train Cudnn model
seed = 0
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
# Train 128 steps
num_steps = 128
for _ in range(num_steps):
inputs, _ = model.SynthesizeInput(seq_length, batch_size, seed)
targets = np.random.rand()
sess.run(
train_op,
feed_dict={
model.inputs: inputs,
model.initial_state: model.ZeroState(batch_size),
target_output: targets
})
seed += 1
save_path = os.path.join(self.get_temp_dir(),
("cudnn-rnn-%s-test" % rnn_mode))
save_v = saver.save(sess, save_path)
self.assertEqual(save_path, save_v)
# Cudnn inference graph
with ops.Graph().as_default() as g:
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype,
training=False)
rnn = model.rnn
saver = saver_lib.Saver()
inference_input = np.random.rand(seq_length, batch_size,
input_size).astype(np.float32)
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
saver.restore(sess, save_path)
# Cudnn inference
cudnn_outputs_v, cudnn_output_states_v = model.Feed(
sess, inference_input, return_sum=False)
# Canonical RNN inference graph
with ops.Graph().as_default() as g:
cell_inputs = array_ops.placeholder(
dtype, shape=[seq_length, batch_size, input_size])
if direction == CUDNN_RNN_UNIDIRECTION:
# outputs is one tensor, states are num_layer tuples, each 2 tensors
(outputs, states) = _CreateCudnnCompatibleCanonicalRNN(rnn, cell_inputs)
if rnn_mode == CUDNN_LSTM:
output_h = array_ops.stack([s.h for s in states])
output_c = array_ops.stack([s.c for s in states])
else:
output_state = array_ops.stack([s for s in states])
else:
# outputs is one tensor.
# states is a tuple of 2 tuples:
# each sub tuple is num_layer tuples, each with 2 tensors.
(outputs, states) = _CreateCudnnCompatibleCanonicalRNN(
rnn, cell_inputs, is_bidi=True)
output_state_fw, output_state_bw = states
if rnn_mode == CUDNN_LSTM:
output_h, output_c = [], []
for s_fw, s_bw in zip(output_state_fw, output_state_bw):
output_h.append(array_ops.stack([s_fw.h, s_bw.h]))
output_c.append(array_ops.stack([s_fw.c, s_bw.c]))
output_h = array_ops.concat(output_h, axis=0)
output_c = array_ops.concat(output_c, axis=0)
else:
output_state = []
for s_fw, s_bw in zip(output_state_fw, output_state_bw):
output_state.append(array_ops.stack([s_fw, s_bw]))
output_state = array_ops.concat(output_state, axis=0)
saver = saver_lib.Saver()
with self.test_session(use_gpu=True, graph=g) as sess:
saver.restore(sess, save_path)
# BlockCell inference
if rnn_mode == CUDNN_LSTM:
outputs_v, output_h_v, output_c_v = sess.run(
[outputs, output_h, output_c],
feed_dict={cell_inputs: inference_input})
self.assertAllClose(cudnn_outputs_v, outputs_v)
cudnn_output_h_v, cudnn_output_c_v = cudnn_output_states_v
self.assertAllClose(cudnn_output_h_v, output_h_v)
self.assertAllClose(cudnn_output_c_v, output_c_v)
else:
outputs_v, output_state_v = sess.run(
[outputs, output_state],
feed_dict={cell_inputs: inference_input})
self.assertAllClose(cudnn_outputs_v, outputs_v, atol=2e-5, rtol=2e-5)
(cudnn_output_h_v,) = cudnn_output_states_v
self.assertAllClose(cudnn_output_h_v, output_state_v, atol=2e-5,
rtol=2e-5)
class CudnnRNNTestParamsSize(TensorFlowTestCase):
def _TestOpaqueParamsSize(self, rnn_mode, num_layers, num_units, input_size,
dtype, direction):
logging.info("Testing one lstm param size with config: %s", locals())
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
dtype=dtype,
direction=direction)
rnn = model.rnn
# Min param size estimate = sum(weights.size) + sum(biases.size)
min_params_size = (
np.sum(map(np.prod, rnn.canonical_weight_shapes)) +
np.sum([sp[0] for sp in rnn.canonical_bias_shapes]))
opaque_params = rnn.trainable_variables[0]
with self.test_session(use_gpu=True, graph=ops.get_default_graph()):
variables.global_variables_initializer().run()
opaque_params_size_v = opaque_params.eval().size
self.assertLessEqual(min_params_size, opaque_params_size_v)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testOpaqueParamsSize(self):
test_configs = [
[4, 200, 200],
[4, 200, 300],
[4, 200, 100],
[1, 100, 200],
[2, 200, 100],
[3, 200, 400],
]
directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]
dtype_list = [dtypes.float16, dtypes.float32, dtypes.float64]
rnns = [CUDNN_LSTM, CUDNN_GRU, CUDNN_RNN_RELU, CUDNN_RNN_TANH]
for (rnn, config, dtype, direction) in itertools.product(
rnns, test_configs, dtype_list, directions):
num_layers, num_units, input_size = config
with ops.Graph().as_default():
self._TestOpaqueParamsSize(rnn, num_layers, num_units, input_size,
dtype, direction)
class CudnnRNNTestTraining(TensorFlowTestCase):
def _ComputeNumericGrad(self, sess, y, x, delta=1e-4, step=1):
"""Compute the numeric gradient of y wrt to x.
Args:
sess: The TF session constructed with a graph containing x and y.
y: A scalar TF Tensor in the graph constructed in sess.
x: A TF Tensor in the graph constructed in sess.
delta: Gradient checker's small perturbation of x[i].
step: Only compute numerical gradients for a subset of x values.
I.e. dy/dx[i] is computed if i % step == 0.
Returns:
A Tensor of the same shape and dtype as x. If x[i] is not chosen
to compute the numerical gradient dy/x[i], the corresponding
value is set to 0.
"""
x_data = sess.run(x)
x_size = x_data.size
x_shape = x_data.shape
numeric_grad = np.zeros(x_size, dtype=x_data.dtype)
for i in range(0, x_size, step):
x_pos = x_data.copy()
if x_size == 1:
x_pos += delta
else:
x_pos.flat[i] += delta
y_pos_feed_dict = dict([(x.name, x_pos)])
y_pos = sess.run(y, feed_dict=y_pos_feed_dict)
x_neg = x_data.copy()
if x_size == 1:
x_neg -= delta
else:
x_neg.flat[i] -= delta
y_neg_feed_dict = dict([(x.name, x_neg)])
y_neg = sess.run(y, feed_dict=y_neg_feed_dict)
numeric_grad[i] = (y_pos - y_neg) / (2 * delta)
return numeric_grad.reshape(x_shape)
def _GetShape(self, sess, inputs):
if not isinstance(inputs, collections.Iterable):
return sess.run(array_ops.shape(inputs))
else:
return sess.run([array_ops.shape(x) for x in inputs])
def _GradientCheckFp16(self, sess, y, xs, num_samples,
tolerance=1e-6, delta=1e-4):
"""Gradient check for Fp16.
Fp16 numerical gradients end up being zeros. Use a new way to check
gradients:
Given multi-variant function:
y = f(x1, x2, ... xn)
delta_y = f(x1 + delta_x1, x2+delta_x2, ..., xn+delta_xn) -
f(x1, x2, ..., xn)
= f'(x1) * delta_x1 + f'(x2) * delta_x2 + .. + f'(xn) * delta_xn
where:
delta_xi are very small disturbance.
f'(xi) is the gradient of y w.r.t xi.
The gradient check verifies the expected delta_y calculated by the above
equation is close to the actual delta_y.
Args:
sess: tf.Session object.
y: output tensor.
xs: a tensor or a list of input tensors.
num_samples: number of test samples to run.
tolerance: error tolerance.
delta: the order of magnititued of input disturbance to apply to calculate
the output change w.r.t inputs.
"""
sym_grads = self._ComputeSymGrads(sess, y, xs)
xs_shapes = self._GetShape(sess, xs)
x_vals = [sess.run(x) for x in xs]
for _ in range(num_samples):
delta_xs = [delta * np.random.rand(*shape.tolist())
for shape in xs_shapes]
feed_dict = {}
for x, x_val, delta_x in zip(xs, x_vals, delta_xs):
feed_dict[x] = x_val + delta_x
actual_delta_y = (float(sess.run(y, feed_dict=feed_dict)) -
float(sess.run(y)))
expected_delta_y = 0.
for sym_grad, delta_x in zip(sym_grads, delta_xs):
expected_delta_y += np.dot(
sym_grad.astype(np.float32).flatten(),
delta_x.astype(np.float32).flatten())
self.assertAllClose(expected_delta_y, actual_delta_y,
atol=tolerance, rtol=tolerance)
def _GradientCheck(self, sess, y, xs, tolerance=1e-6, delta=1e-4):
sym_grads = self._ComputeSymGrads(sess, y, xs)
num_grads = [self._ComputeNumericGrad(sess, y, x, delta) for x in xs]
self.assertEqual(len(sym_grads), len(num_grads))
for sym, num in zip(sym_grads, num_grads):
self.assertFalse(np.any(np.isnan(sym)))
self.assertFalse(np.any(np.isnan(num)))
self.assertAllClose(sym, num, atol=tolerance, rtol=tolerance)
def _ComputeSymGrads(self, sess, y, xs):
sym_grads_t = gradients.gradients(y, xs)
return sess.run(sym_grads_t)
def _TestOneSimpleTraining(self, rnn_mode, num_layers, num_units, input_size,
batch_size, seq_length, dir_count, dropout, dtype,
delta, tolerance):
# Gradient checking runs two forward ops with almost the same input. Need to
# make sure the drop patterns across the two runs are the same.
logging.info("Training test with config: %s", locals())
old_env_state = os.environ.get("TF_CUDNN_RESET_RND_GEN_STATE", str(False))
os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = str(True)
np.random.seed(1234)
random_seed.set_random_seed(5678)
has_input_c = (rnn_mode == CUDNN_LSTM)
direction = (CUDNN_RNN_UNIDIRECTION
if dir_count == 1 else CUDNN_RNN_BIDIRECTION)
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dropout=dropout,
dtype=dtype,
training=True,
bias_initializer=init_ops.random_normal_initializer(
mean=1., dtype=dtype))
rnn = model.rnn
params = rnn.trainable_variables[0]
inputs = variables.Variable(
random_ops.random_uniform(
[seq_length, batch_size, input_size], dtype=dtype),
dtype=dtype)
input_h = variables.Variable(
random_ops.random_uniform(
[num_layers * dir_count, batch_size, num_units], dtype=dtype),
dtype=dtype)
if has_input_c:
input_c = variables.Variable(
random_ops.random_uniform(
[num_layers * dir_count, batch_size, num_units], dtype=dtype),
dtype=dtype)
initial_state = (input_h, input_c)
else:
initial_state = (input_h,)
total_sum = model.FProp(inputs, initial_state, training=True)
with self.test_session(use_gpu=True, graph=ops.get_default_graph()) as sess:
sess.run(variables.global_variables_initializer())
all_inputs = [inputs, params]
for s in initial_state:
all_inputs.append(s)
if dtype == dtypes.float16:
self._GradientCheckFp16(
sess, total_sum, all_inputs,
num_samples=FLAGS.grad_check_num_samples,
tolerance=tolerance, delta=delta)
else:
for _ in range(FLAGS.grad_check_num_samples):
# Each time choose a different set of inputs.
sess.run(variables.global_variables_initializer())
self._GradientCheck(
sess, total_sum, all_inputs,
tolerance=tolerance, delta=delta)
os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = old_env_state
def _TestSimpleTrainingHelper(self, rnn_mode, test_configs):
dropouts = [0, 0.5, 1.]
for config, dropout in itertools.product(test_configs, dropouts):
dtype = config.get("dtype", dtypes.float32)
delta = config.get("delta", 1e-4)
tolerance = config.get("tolerance", 1e-6)
dir_count = config.get("dir_count", 1)
shape = config["shape"]
with ops.Graph().as_default():
self._TestOneSimpleTraining(rnn_mode, shape["num_layers"],
shape["num_units"], shape["input_size"],
shape["batch_size"], shape["seq_length"],
dir_count, dropout, dtype, delta,
tolerance)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingLSTMFp64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingLSTMFp32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-4,
"tolerance": 9e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingLSTMFp16(self):
test_configs = [
{
"dtype": dtypes.float16,
"delta": 1e-3,
"tolerance": 9e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
{
"dtype": dtypes.float16,
"delta": 1e-2,
"tolerance": 9e-2,
"shape": {
"num_layers": 2,
"num_units": 6,
"input_size": 8,
"batch_size": 6,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingGRUFp64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
}
},
]
self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingGRUFp32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-3,
"tolerance": 4e-3,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingGRUFp16(self):
test_configs = [
{
"dtype": dtypes.float16,
"delta": 2e-3,
"tolerance": 6e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNTanhFp64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_TANH, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNTanhFp32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-3,
"tolerance": 5e-3,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_TANH, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNTanhFp16(self):
test_configs = [
{
"dtype": dtypes.float16,
"delta": 1e-3,
"tolerance": 5e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_TANH, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNReluFp64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_RELU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNReluFp32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-4,
"tolerance": 3e-1,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_RELU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNReluFp16(self):
test_configs = [
{
"dtype": dtypes.float16,
"delta": 1e-3,
"tolerance": 7e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_RELU, test_configs)
if __name__ == "__main__":
argv0 = sys.argv[0]
parser = argparse.ArgumentParser()
parser.add_argument(
"--grad_check_num_samples",
type=int,
default=5,
help="Number of samples to run for gradient check.")
FLAGS, unparsed = parser.parse_known_args()
sys.argv = [argv0] + unparsed
googletest.main()
| apache-2.0 |
apiad/sublime-browser-integration | selenium/webdriver/opera/webdriver.py | 43 | 2656 | #!/usr/bin/python
#
# Copyright 2011-2013 Sofware freedom conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
try:
import http.client as http_client
except ImportError:
import httplib as http_client
import os
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .service import Service
class WebDriver(RemoteWebDriver):
"""
Controls the OperaDriver and allows you to drive the browser.
"""
def __init__(self, executable_path=None, port=0,
desired_capabilities=DesiredCapabilities.OPERA):
"""
Creates a new instance of the Opera driver.
Starts the service and then creates new instance of Opera Driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the
Environment Variable SELENIUM_SERVER_JAR
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with desired capabilities (Can be used to provide various Opera switches).
"""
if executable_path is None:
try:
executable_path = os.environ["SELENIUM_SERVER_JAR"]
except:
raise Exception("No executable path given, please add one to Environment Variable \
'SELENIUM_SERVER_JAR'")
self.service = Service(executable_path, port=port)
self.service.start()
RemoteWebDriver.__init__(self,
command_executor=self.service.service_url,
desired_capabilities=desired_capabilities)
self._is_remote = False
def quit(self):
"""
Closes the browser and shuts down the OperaDriver executable
that is started when starting the OperaDriver
"""
try:
RemoteWebDriver.quit(self)
except http_client.BadStatusLine:
pass
finally:
self.service.stop()
| mit |
mkokotovich/play_smear | play_smear_api/dbqueries/daily_status.py | 1 | 4278 | from stat_gatherer import StatGatherer
from datetime import datetime, timedelta
from dateutil import tz
from collections import Counter
import sys
class DailyStatus():
def __init__(self):
self.stat = StatGatherer()
self.stat.connect_to_db()
self.game_list = None
self.bid_list = None
self.player_list = None
self.local_time = None
def load_stats_since_previous_date(self, number_of_days):
# Assume we are running some time after 4am CDT (which is 9am UTC)
# "Yesterday" should go form 9am UTC on previous day to 9am UTC today
yesterday_start = datetime.utcnow() - timedelta(days=number_of_days)
yesterday_start = yesterday_start.replace(hour = 9, minute = 0, second = 0, microsecond = 0)
# Save the start time in local time zone
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
yesterday_with_tz = yesterday_start.replace(tzinfo=from_zone)
self.local_time = yesterday_with_tz.astimezone(to_zone)
self.game_list = list(self.stat.db.games.find({"date_added" : { "$gte" : yesterday_start }}))
self.bid_list = list(self.stat.db.bids.find({"date_added" : { "$gte" : yesterday_start }}))
self.player_list = list(self.stat.db.players.find())
def find_player_names_from_ids(self, player_ids):
player_names = []
for player_id in player_ids:
player_names.append(next((x["username"] for x in self.player_list if x["_id"] == player_id), "Unknown"))
return player_names
def find_winning_score(self, game):
for result in game["results"]:
if result["player"] in game["winners"]:
return result["final_score"]
def find_losing_scores(self, game):
losing_scores = []
losing_teams = []
for result in game["results"]:
if result["player"] not in game["winners"] and result["team_id"] not in losing_teams:
losing_scores.append(result["final_score"])
if result["team_id"] != None:
losing_teams.append(result["team_id"])
return losing_scores
def get_num_games(self):
return len(self.game_list)
def print_player_summary(self, first_players):
player_summary = "Summary of players:\n"
count_list = Counter(first_players).most_common()
for name, count in count_list:
player_summary += "{} played {} games\n".format(name, count)
player_summary += "\n"
return player_summary
def print_game_stats(self):
message = ""
message += "\n"
message += "There have been {} games played since {}:".format(len(self.game_list), self.local_time)
message += "\n"
message += "\n"
first_players = []
game_summaries = ""
for game in self.game_list:
game_summaries += " Game {}:".format(game["_id"])
game_summaries += "\n"
player_list = self.find_player_names_from_ids(game["players"])
first_players.append(player_list[0])
game_summaries += " Players: {}".format(", ".join(player_list))
game_summaries += "\n"
game_summaries += " Number of hands played: {}".format(len(game["hands"]))
game_summaries += "\n"
if game["winners"]:
game_summaries += " Final score: {} - {}".format(self.find_winning_score(game), ", ".join(str(x) for x in self.find_losing_scores(game)))
game_summaries += "\n"
game_summaries += " Winners: {}".format(", ".join(self.find_player_names_from_ids(game["winners"])))
game_summaries += "\n"
game_summaries += "\n"
message += self.print_player_summary(first_players)
message += game_summaries
return message
def main():
number_of_days = 1
if len(sys.argv) > 1:
try:
number_of_days = int(sys.argv[1])
except ValueError:
print "Invalid number of days: " + sys.argv[1]
daily_status = DailyStatus()
daily_status.load_stats_since_previous_date(number_of_days)
print daily_status.print_game_stats()
if __name__ == '__main__':
main()
| gpl-3.0 |
alexallah/django | tests/view_tests/tests/test_defaults.py | 60 | 5068 | import datetime
from django.contrib.sites.models import Site
from django.http import Http404
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.views.defaults import (
bad_request, page_not_found, permission_denied, server_error,
)
from ..models import Article, Author, UrlArticle
@override_settings(ROOT_URLCONF='view_tests.urls')
class DefaultsTests(TestCase):
"""Test django views in django/views/defaults.py"""
nonexistent_urls = [
'/nonexistent_url/', # this is in urls.py
'/other_nonexistent_url/', # this NOT in urls.py
]
@classmethod
def setUpTestData(cls):
Author.objects.create(name='Boris')
Article.objects.create(
title='Old Article', slug='old_article', author_id=1,
date_created=datetime.datetime(2001, 1, 1, 21, 22, 23)
)
Article.objects.create(
title='Current Article', slug='current_article', author_id=1,
date_created=datetime.datetime(2007, 9, 17, 21, 22, 23)
)
Article.objects.create(
title='Future Article', slug='future_article', author_id=1,
date_created=datetime.datetime(3000, 1, 1, 21, 22, 23)
)
UrlArticle.objects.create(
title='Old Article', slug='old_article', author_id=1,
date_created=datetime.datetime(2001, 1, 1, 21, 22, 23)
)
Site(id=1, domain='testserver', name='testserver').save()
def test_page_not_found(self):
"A 404 status is returned by the page_not_found view"
for url in self.nonexistent_urls:
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'404.html': '{{ csrf_token }}',
}),
],
},
}])
def test_csrf_token_in_404(self):
"""
The 404 page should have the csrf_token available in the context
"""
# See ticket #14565
for url in self.nonexistent_urls:
response = self.client.get(url)
self.assertNotEqual(response.content, b'NOTPROVIDED')
self.assertNotEqual(response.content, b'')
def test_server_error(self):
"The server_error view raises a 500 status"
response = self.client.get('/server_error/')
self.assertEqual(response.status_code, 500)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'404.html': 'This is a test template for a 404 error '
'(path: {{ request_path }}, exception: {{ exception }}).',
'500.html': 'This is a test template for a 500 error.',
}),
],
},
}])
def test_custom_templates(self):
"""
404.html and 500.html templates are picked by their respective handler.
"""
response = self.client.get('/server_error/')
self.assertContains(response, "test template for a 500 error", status_code=500)
response = self.client.get('/no_such_url/')
self.assertContains(response, 'path: /no_such_url/', status_code=404)
self.assertContains(response, 'exception: Resolver404', status_code=404)
response = self.client.get('/technical404/')
self.assertContains(response, 'exception: Testing technical 404.', status_code=404)
def test_get_absolute_url_attributes(self):
"A model can set attributes on the get_absolute_url method"
self.assertTrue(getattr(UrlArticle.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
article = UrlArticle.objects.get(pk=1)
self.assertTrue(getattr(article.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
def test_custom_templates_wrong(self):
"""
Default error views should raise TemplateDoesNotExist when passed a
template that doesn't exist.
"""
rf = RequestFactory()
request = rf.get('/')
with self.assertRaises(TemplateDoesNotExist):
bad_request(request, Exception(), template_name='nonexistent')
with self.assertRaises(TemplateDoesNotExist):
permission_denied(request, Exception(), template_name='nonexistent')
with self.assertRaises(TemplateDoesNotExist):
page_not_found(request, Http404(), template_name='nonexistent')
with self.assertRaises(TemplateDoesNotExist):
server_error(request, template_name='nonexistent')
| bsd-3-clause |
qgis/QGIS | tests/src/python/test_qgssymbollayer_readsld.py | 30 | 17835 | # -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgssymbollayer_readsld.py
---------------------
Date : January 2017
Copyright : (C) 2017, Jorge Gustavo Rocha
Email : jgr at di dot uminho dot pt
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Jorge Gustavo Rocha'
__date__ = 'January 2017'
__copyright__ = '(C) 2017, Jorge Gustavo Rocha'
import qgis # NOQA
import os
from qgis.PyQt.QtXml import QDomDocument
from qgis.testing import start_app, unittest
from qgis.core import (QgsVectorLayer,
QgsFeature,
QgsGeometry,
QgsUnitTypes,
QgsPointXY,
QgsSvgMarkerSymbolLayer,
QgsEllipseSymbolLayer,
QgsSimpleFillSymbolLayer,
QgsSVGFillSymbolLayer,
QgsSvgMarkerSymbolLayer,
QgsLinePatternFillSymbolLayer,
QgsSimpleLineSymbolLayer,
QgsMarkerLineSymbolLayer,
QgsSimpleMarkerSymbolLayer,
QgsFontMarkerSymbolLayer
)
from qgis.testing.mocked import get_iface
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
def createLayerWithOneLine():
# create a temporary layer
# linelayer = iface.addVectorLayer("LineString?crs=epsg:4326&field=gid:int&field=name:string", "simple_line", "memory")
linelayer = QgsVectorLayer("LineString?crs=epsg:4326&field=gid:int&field=name:string", "simple_line", "memory")
one = QgsFeature(linelayer.dataProvider().fields(), 0)
one.setAttributes([1, 'one'])
one.setGeometry(QgsGeometry.fromPolylineXY([QgsPointXY(-7, 38), QgsPointXY(-8, 42)]))
linelayer.dataProvider().addFeatures([one])
return linelayer
def createLayerWithOnePoint():
layer = QgsVectorLayer("Point?field=fldtxt:string&field=fldint:integer",
"addfeat", "memory")
pr = layer.dataProvider()
f = QgsFeature()
f.setAttributes(["test", 123])
f.setGeometry(QgsGeometry.fromPointXY(QgsPointXY(100, 200)))
assert pr.addFeatures([f])
assert layer.featureCount() == 1
return layer
def createLayerWithOnePolygon():
layer = QgsVectorLayer("Polygon?crs=epsg:3111&field=pk:int", "vl", "memory")
assert layer.isValid()
f1 = QgsFeature(layer.dataProvider().fields(), 1)
f1.setAttribute("pk", 1)
f1.setGeometry(QgsGeometry.fromPolygonXY([[QgsPointXY(2484588, 2425722), QgsPointXY(2482767, 2398853),
QgsPointXY(2520109, 2397715), QgsPointXY(2520792, 2425494),
QgsPointXY(2484588, 2425722)]]))
assert layer.dataProvider().addFeatures([f1])
return layer
class TestQgsSymbolLayerReadSld(unittest.TestCase):
"""
This class checks if SLD styles are properly applied
"""
def setUp(self):
self.iface = get_iface()
# test <CSSParameter>VALUE<CSSParameter/>
# test <CSSParameter><ogc:Literal>VALUE<ogc:Literal/><CSSParameter/>
def test_Literal_within_CSSParameter(self):
layer = createLayerWithOneLine()
mFilePath = os.path.join(TEST_DATA_DIR, 'symbol_layer/external_sld/simple_streams.sld')
layer.loadSldStyle(mFilePath)
props = layer.renderer().symbol().symbolLayers()[0].properties()
def testLineColor():
# stroke CSSParameter within ogc:Literal
# expected color is #003EBA, RGB 0,62,186
self.assertEqual(layer.renderer().symbol().symbolLayers()[0].color().name(), '#003eba')
def testLineWidth():
# stroke-width CSSParameter within ogc:Literal
self.assertEqual(props['line_width'], '2')
def testLineOpacity():
# stroke-opacity CSSParameter NOT within ogc:Literal
# stroke-opacity=0.1
self.assertEqual(props['line_color'], '0,62,186,25')
testLineColor()
testLineWidth()
testLineOpacity()
def testSimpleMarkerRotation(self):
"""
Test if pointMarker property sld:Rotation value can be read if format is:
<sld:Rotation>50.0</sld:Rotation>
or
<se:Rotation><ogc:Literal>50</ogc:Literal></se:Rotation>
"""
# technically it's not necessary to use a real shape, but a empty memory
# layer. In case these tests will upgrade to a rendering where to
# compare also rendering not only properties
# myShpFile = os.path.join(unitTestDataPath(), 'points.shp')
# layer = QgsVectorLayer(myShpFile, 'points', 'ogr')
layer = QgsVectorLayer("Point", "addfeat", "memory")
assert (layer.isValid())
# test if able to read <sld:Rotation>50.0</sld:Rotation>
mFilePath = os.path.join(unitTestDataPath(),
'symbol_layer/external_sld/testSimpleMarkerRotation-directValue.sld')
layer.loadSldStyle(mFilePath)
props = layer.renderer().symbol().symbolLayers()[0].properties()
self.assertEqual(props['angle'], '50')
# test if able to read <se:Rotation><ogc:Literal>50</ogc:Literal></se:Rotation>
mFilePath = os.path.join(unitTestDataPath(),
'symbol_layer/external_sld/testSimpleMarkerRotation-ogcLiteral.sld')
layer.loadSldStyle(mFilePath)
props = layer.renderer().symbol().symbolLayers()[0].properties()
self.assertEqual(props['angle'], '50')
def testSymbolSizeUom(self):
# create a layer
layer = createLayerWithOnePoint()
# load a sld with marker size without uom attribute (pixels)
sld = 'symbol_layer/QgsSvgMarkerSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 12
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
unit = sl.outputUnit()
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
# load a sld with marker size with uom attribute in pixel
sld = 'symbol_layer/QgsSvgMarkerSymbolLayerUomPixel.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 12
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
unit = sl.outputUnit()
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
# load a sld with marker size with uom attribute in meter
sld = 'symbol_layer/QgsSvgMarkerSymbolLayerUomMetre.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 12 / (0.28 * 0.001)
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
unit = sl.outputUnit()
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertAlmostEqual(size, sld_size_px, delta=0.1)
# load a sld with marker size with uom attribute in foot
sld = 'symbol_layer/QgsSvgMarkerSymbolLayerUomFoot.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 12 * (304.8 / 0.28)
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
unit = sl.outputUnit()
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertAlmostEqual(size, sld_size_px, delta=0.1)
def testSymbolSize(self):
# create a layers
layer = createLayerWithOnePoint()
player = createLayerWithOnePolygon()
# size test for QgsEllipseSymbolLayer
sld = 'symbol_layer/QgsEllipseSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 7
sld_stroke_width_px = 1
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.symbolWidth()
stroke_width = sl.strokeWidth()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsEllipseSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
self.assertEqual(stroke_width, sld_stroke_width_px)
# size test for QgsVectorFieldSymbolLayer
# createFromSld not implemented
# size test for QgsSimpleFillSymbolLayer
sld = 'symbol_layer/QgsSimpleFillSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
player.loadSldStyle(mFilePath)
sld_stroke_width_px = 0.26
sl = player.renderer().symbol().symbolLayers()[0]
stroke_width = sl.strokeWidth()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsSimpleFillSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(stroke_width, sld_stroke_width_px)
# size test for QgsSVGFillSymbolLayer
sld = 'symbol_layer/QgsSVGFillSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
player.loadSldStyle(mFilePath)
sld_size_px = 6
sld_stroke_width_px = 3
sl = player.renderer().symbol().symbolLayers()[0]
size = sl.patternWidth()
stroke_width = sl.svgStrokeWidth()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsSVGFillSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
self.assertEqual(stroke_width, sld_stroke_width_px)
# size test for QgsSvgMarkerSymbolLayer
sld = 'symbol_layer/QgsSvgMarkerSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 12
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsSvgMarkerSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
# size test for QgsPointPatternFillSymbolLayer
# createFromSld not implemented
# size test for QgsLinePatternFillSymbolLayer
sld = 'symbol_layer/QgsLinePatternFillSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
player.loadSldStyle(mFilePath)
sld_size_px = 4
sld_stroke_width_px = 1.5
sl = player.renderer().symbol().symbolLayers()[0]
size = sl.distance()
stroke_width = sl.lineWidth()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsLinePatternFillSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
self.assertEqual(stroke_width, sld_stroke_width_px)
# test size for QgsSimpleLineSymbolLayer
sld = 'symbol_layer/QgsSimpleLineSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
player.loadSldStyle(mFilePath)
sld_stroke_width_px = 1.26
sl = player.renderer().symbol().symbolLayers()[0]
stroke_width = sl.width()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsSimpleLineSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(stroke_width, sld_stroke_width_px)
# test size for QgsMarkerLineSymbolLayer
sld = 'symbol_layer/QgsMarkerLineSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
player.loadSldStyle(mFilePath)
sld_interval_px = 3.3
sld_offset_px = 6.6
sl = player.renderer().symbol().symbolLayers()[0]
interval = sl.interval()
offset = sl.offset()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsMarkerLineSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(interval, sld_interval_px)
self.assertEqual(offset, sld_offset_px)
# test size for QgsSimpleMarkerSymbolLayer
sld = 'symbol_layer/QgsSimpleMarkerSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 6
sld_displacement_x_px = 3.3
sld_displacement_y_px = 6.6
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
offset = sl.offset()
unit = sl.outputUnit()
self.assertTrue(isinstance(sl, QgsSimpleMarkerSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
self.assertEqual(offset.x(), sld_displacement_x_px)
self.assertEqual(offset.y(), sld_displacement_y_px)
# test size for QgsSVGMarkerSymbolLayer
sld = 'symbol_layer/QgsSvgMarkerSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 12
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
self.assertTrue(isinstance(sl, QgsSvgMarkerSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
# test size for QgsFontMarkerSymbolLayer
sld = 'symbol_layer/QgsFontMarkerSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
sld_size_px = 6.23
sl = layer.renderer().symbol().symbolLayers()[0]
size = sl.size()
self.assertTrue(isinstance(sl, QgsFontMarkerSymbolLayer))
self.assertEqual(unit, QgsUnitTypes.RenderPixels)
self.assertEqual(size, sld_size_px)
def testSymbolSizeAfterReload(self):
# create a layer
layer = createLayerWithOnePoint()
# load a sld with marker size
sld = 'symbol_layer/QgsSvgMarkerSymbolLayer.sld'
mFilePath = os.path.join(TEST_DATA_DIR, sld)
layer.loadSldStyle(mFilePath)
# get the size and unit of the symbol
sl = layer.renderer().symbol().symbolLayers()[0]
first_size = sl.size()
first_unit = sl.outputUnit() # in pixels
# export sld into a qdomdocument with namespace processing activated
doc = QDomDocument()
msg = ""
layer.exportSldStyle(doc, msg)
doc.setContent(doc.toString(), True)
self.assertTrue(msg == "")
# reload the same sld
root = doc.firstChildElement("StyledLayerDescriptor")
el = root.firstChildElement("NamedLayer")
layer.readSld(el, msg)
# extract the size and unit of symbol
sl = layer.renderer().symbol().symbolLayers()[0]
second_size = sl.size()
second_unit = sl.outputUnit()
# size and unit should be the same after export and reload the same
# sld description
self.assertEqual(first_size, second_size)
self.assertEqual(first_unit, second_unit)
def test_Literal_within_CSSParameter_and_Text(self):
layer = createLayerWithOneLine()
mFilePath = os.path.join(TEST_DATA_DIR, 'symbol_layer/external_sld/simple_line_with_text.sld')
layer.loadSldStyle(mFilePath)
props = layer.renderer().symbol().symbolLayers()[0].properties()
def testLineColor():
# stroke SvgParameter within ogc:Literal
# expected color is #003EBA, RGB 0,62,186
self.assertEqual(layer.renderer().symbol().symbolLayers()[0].color().name(), '#003eba')
def testLineWidth():
# stroke-width SvgParameter within ogc:Literal
self.assertEqual(props['line_width'], '2')
def testLineOpacity():
# stroke-opacity SvgParameter NOT within ogc:Literal
# stroke-opacity=0.1
self.assertEqual(props['line_color'], '0,62,186,24')
testLineColor()
testLineWidth()
testLineOpacity()
from qgis.core import QgsPalLayerSettings
self.assertTrue(layer.labelsEnabled())
self.assertEqual(layer.labeling().type(), 'simple')
settings = layer.labeling().settings()
self.assertEqual(settings.fieldName, 'name')
format = settings.format()
self.assertEqual(format.color().name(), '#ff0000')
font = format.font()
self.assertEqual(font.family(), 'QGIS Vera Sans')
self.assertTrue(font.bold())
self.assertFalse(font.italic())
self.assertEqual(format.size(), 18)
self.assertEqual(format.sizeUnit(), QgsUnitTypes.RenderPixels)
# the layer contains lines
# from qgis.core import QgsWkbTypes
# self.assertEqual(layer.geometryType(), QgsWkbTypes.LineGeometry)
# the placement should be QgsPalLayerSettings.Line
self.assertEqual(settings.placement, QgsPalLayerSettings.AroundPoint)
self.assertEqual(settings.xOffset, 1)
self.assertEqual(settings.yOffset, 0)
self.assertEqual(settings.offsetUnits, QgsUnitTypes.RenderPixels)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
laiqiqi886/kbengine | kbe/res/scripts/common/Lib/mimetypes.py | 83 | 20735 | """Guess the MIME type of a file.
This module defines two useful functions:
guess_type(url, strict=True) -- guess the MIME type and encoding of a URL.
guess_extension(type, strict=True) -- guess the extension for a given MIME type.
It also contains the following, for tuning the behavior:
Data:
knownfiles -- list of files to parse
inited -- flag set when init() has been called
suffix_map -- dictionary mapping suffixes to suffixes
encodings_map -- dictionary mapping suffixes to encodings
types_map -- dictionary mapping suffixes to types
Functions:
init([files]) -- parse a list of files, default knownfiles (on Windows, the
default values are taken from the registry)
read_mime_types(file) -- parse one file, return a dictionary or None
"""
import os
import sys
import posixpath
import urllib.parse
try:
import winreg as _winreg
except ImportError:
_winreg = None
__all__ = [
"guess_type","guess_extension","guess_all_extensions",
"add_type","read_mime_types","init"
]
knownfiles = [
"/etc/mime.types",
"/etc/httpd/mime.types", # Mac OS X
"/etc/httpd/conf/mime.types", # Apache
"/etc/apache/mime.types", # Apache 1
"/etc/apache2/mime.types", # Apache 2
"/usr/local/etc/httpd/conf/mime.types",
"/usr/local/lib/netscape/mime.types",
"/usr/local/etc/httpd/conf/mime.types", # Apache 1.2
"/usr/local/etc/mime.types", # Apache 1.3
]
inited = False
_db = None
class MimeTypes:
"""MIME-types datastore.
This datastore can handle information from mime.types-style files
and supports basic determination of MIME type from a filename or
URL, and can guess a reasonable extension given a MIME type.
"""
def __init__(self, filenames=(), strict=True):
if not inited:
init()
self.encodings_map = encodings_map.copy()
self.suffix_map = suffix_map.copy()
self.types_map = ({}, {}) # dict for (non-strict, strict)
self.types_map_inv = ({}, {})
for (ext, type) in types_map.items():
self.add_type(type, ext, True)
for (ext, type) in common_types.items():
self.add_type(type, ext, False)
for name in filenames:
self.read(name, strict)
def add_type(self, type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
self.types_map[strict][ext] = type
exts = self.types_map_inv[strict].setdefault(type, [])
if ext not in exts:
exts.append(ext)
def guess_type(self, url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if
the type can't be guessed (no or unknown suffix) or a string
of the form type/subtype, usable for a MIME Content-type
header; and encoding is None for no encoding or the name of
the program used to encode (e.g. compress or gzip). The
mappings are table driven. Encoding suffixes are case
sensitive; type suffixes are first tried case sensitive, then
case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all
mapped to '.tar.gz'. (This is table-driven too, using the
dictionary suffix_map.)
Optional `strict' argument when False adds a bunch of commonly found,
but non-standard types.
"""
scheme, url = urllib.parse.splittype(url)
if scheme == 'data':
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
# type/subtype defaults to "text/plain"
comma = url.find(',')
if comma < 0:
# bad data URL
return None, None
semi = url.find(';', 0, comma)
if semi >= 0:
type = url[:semi]
else:
type = url[:comma]
if '=' in type or '/' not in type:
type = 'text/plain'
return type, None # never compressed, so encoding is None
base, ext = posixpath.splitext(url)
while ext in self.suffix_map:
base, ext = posixpath.splitext(base + self.suffix_map[ext])
if ext in self.encodings_map:
encoding = self.encodings_map[ext]
base, ext = posixpath.splitext(base)
else:
encoding = None
types_map = self.types_map[True]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
elif strict:
return None, encoding
types_map = self.types_map[False]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
else:
return None, encoding
def guess_all_extensions(self, type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type' by guess_type().
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
type = type.lower()
extensions = self.types_map_inv[True].get(type, [])
if not strict:
for ext in self.types_map_inv[False].get(type, []):
if ext not in extensions:
extensions.append(ext)
return extensions
def guess_extension(self, type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension,
including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
extensions = self.guess_all_extensions(type, strict)
if not extensions:
return None
return extensions[0]
def read(self, filename, strict=True):
"""
Read a single mime.types-format file, specified by pathname.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
with open(filename, encoding='utf-8') as fp:
self.readfp(fp, strict)
def readfp(self, fp, strict=True):
"""
Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
while 1:
line = fp.readline()
if not line:
break
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words:
continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
self.add_type(type, '.' + suff, strict)
def read_windows_registry(self, strict=True):
"""
Load the MIME types database from Windows registry.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
# Windows only
if not _winreg:
return
def enum_types(mimedb):
i = 0
while True:
try:
ctype = _winreg.EnumKey(mimedb, i)
except EnvironmentError:
break
else:
yield ctype
i += 1
with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, '') as hkcr:
for subkeyname in enum_types(hkcr):
try:
with _winreg.OpenKey(hkcr, subkeyname) as subkey:
# Only check file extensions
if not subkeyname.startswith("."):
continue
# raises EnvironmentError if no 'Content Type' value
mimetype, datatype = _winreg.QueryValueEx(
subkey, 'Content Type')
if datatype != _winreg.REG_SZ:
continue
self.add_type(mimetype, subkeyname, strict)
except EnvironmentError:
continue
def guess_type(url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
Optional `strict' argument when false adds a bunch of commonly found, but
non-standard types.
"""
if _db is None:
init()
return _db.guess_type(url, strict)
def guess_all_extensions(type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_all_extensions(type, strict)
def guess_extension(type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension, including the
leading dot ('.'). The extension is not guaranteed to have been
associated with any particular data stream, but would be mapped to the
MIME type `type' by guess_type(). If no extension can be guessed for
`type', None is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_extension(type, strict)
def add_type(type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if _db is None:
init()
return _db.add_type(type, ext, strict)
def init(files=None):
global suffix_map, types_map, encodings_map, common_types
global inited, _db
inited = True # so that MimeTypes.__init__() doesn't call us again
db = MimeTypes()
if files is None:
if _winreg:
db.read_windows_registry()
files = knownfiles
for file in files:
if os.path.isfile(file):
db.read(file)
encodings_map = db.encodings_map
suffix_map = db.suffix_map
types_map = db.types_map[True]
common_types = db.types_map[False]
# Make the DB a global variable now that it is fully initialized
_db = db
def read_mime_types(file):
try:
f = open(file)
except OSError:
return None
with f:
db = MimeTypes()
db.readfp(f, True)
return db.types_map[True]
def _default_mime_types():
global suffix_map
global encodings_map
global types_map
global common_types
suffix_map = {
'.svgz': '.svg.gz',
'.tgz': '.tar.gz',
'.taz': '.tar.gz',
'.tz': '.tar.gz',
'.tbz2': '.tar.bz2',
'.txz': '.tar.xz',
}
encodings_map = {
'.gz': 'gzip',
'.Z': 'compress',
'.bz2': 'bzip2',
'.xz': 'xz',
}
# Before adding new types, make sure they are either registered with IANA,
# at http://www.iana.org/assignments/media-types
# or extensions, i.e. using the x- prefix
# If you add to these, please keep them sorted!
types_map = {
'.a' : 'application/octet-stream',
'.ai' : 'application/postscript',
'.aif' : 'audio/x-aiff',
'.aifc' : 'audio/x-aiff',
'.aiff' : 'audio/x-aiff',
'.au' : 'audio/basic',
'.avi' : 'video/x-msvideo',
'.bat' : 'text/plain',
'.bcpio' : 'application/x-bcpio',
'.bin' : 'application/octet-stream',
'.bmp' : 'image/x-ms-bmp',
'.c' : 'text/plain',
# Duplicates :(
'.cdf' : 'application/x-cdf',
'.cdf' : 'application/x-netcdf',
'.cpio' : 'application/x-cpio',
'.csh' : 'application/x-csh',
'.css' : 'text/css',
'.dll' : 'application/octet-stream',
'.doc' : 'application/msword',
'.dot' : 'application/msword',
'.dvi' : 'application/x-dvi',
'.eml' : 'message/rfc822',
'.eps' : 'application/postscript',
'.etx' : 'text/x-setext',
'.exe' : 'application/octet-stream',
'.gif' : 'image/gif',
'.gtar' : 'application/x-gtar',
'.h' : 'text/plain',
'.hdf' : 'application/x-hdf',
'.htm' : 'text/html',
'.html' : 'text/html',
'.ico' : 'image/vnd.microsoft.icon',
'.ief' : 'image/ief',
'.jpe' : 'image/jpeg',
'.jpeg' : 'image/jpeg',
'.jpg' : 'image/jpeg',
'.js' : 'application/javascript',
'.ksh' : 'text/plain',
'.latex' : 'application/x-latex',
'.m1v' : 'video/mpeg',
'.m3u' : 'application/vnd.apple.mpegurl',
'.m3u8' : 'application/vnd.apple.mpegurl',
'.man' : 'application/x-troff-man',
'.me' : 'application/x-troff-me',
'.mht' : 'message/rfc822',
'.mhtml' : 'message/rfc822',
'.mif' : 'application/x-mif',
'.mov' : 'video/quicktime',
'.movie' : 'video/x-sgi-movie',
'.mp2' : 'audio/mpeg',
'.mp3' : 'audio/mpeg',
'.mp4' : 'video/mp4',
'.mpa' : 'video/mpeg',
'.mpe' : 'video/mpeg',
'.mpeg' : 'video/mpeg',
'.mpg' : 'video/mpeg',
'.ms' : 'application/x-troff-ms',
'.nc' : 'application/x-netcdf',
'.nws' : 'message/rfc822',
'.o' : 'application/octet-stream',
'.obj' : 'application/octet-stream',
'.oda' : 'application/oda',
'.p12' : 'application/x-pkcs12',
'.p7c' : 'application/pkcs7-mime',
'.pbm' : 'image/x-portable-bitmap',
'.pdf' : 'application/pdf',
'.pfx' : 'application/x-pkcs12',
'.pgm' : 'image/x-portable-graymap',
'.pl' : 'text/plain',
'.png' : 'image/png',
'.pnm' : 'image/x-portable-anymap',
'.pot' : 'application/vnd.ms-powerpoint',
'.ppa' : 'application/vnd.ms-powerpoint',
'.ppm' : 'image/x-portable-pixmap',
'.pps' : 'application/vnd.ms-powerpoint',
'.ppt' : 'application/vnd.ms-powerpoint',
'.ps' : 'application/postscript',
'.pwz' : 'application/vnd.ms-powerpoint',
'.py' : 'text/x-python',
'.pyc' : 'application/x-python-code',
'.pyo' : 'application/x-python-code',
'.qt' : 'video/quicktime',
'.ra' : 'audio/x-pn-realaudio',
'.ram' : 'application/x-pn-realaudio',
'.ras' : 'image/x-cmu-raster',
'.rdf' : 'application/xml',
'.rgb' : 'image/x-rgb',
'.roff' : 'application/x-troff',
'.rtx' : 'text/richtext',
'.sgm' : 'text/x-sgml',
'.sgml' : 'text/x-sgml',
'.sh' : 'application/x-sh',
'.shar' : 'application/x-shar',
'.snd' : 'audio/basic',
'.so' : 'application/octet-stream',
'.src' : 'application/x-wais-source',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc' : 'application/x-sv4crc',
'.svg' : 'image/svg+xml',
'.swf' : 'application/x-shockwave-flash',
'.t' : 'application/x-troff',
'.tar' : 'application/x-tar',
'.tcl' : 'application/x-tcl',
'.tex' : 'application/x-tex',
'.texi' : 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.tif' : 'image/tiff',
'.tiff' : 'image/tiff',
'.tr' : 'application/x-troff',
'.tsv' : 'text/tab-separated-values',
'.txt' : 'text/plain',
'.ustar' : 'application/x-ustar',
'.vcf' : 'text/x-vcard',
'.wav' : 'audio/x-wav',
'.wiz' : 'application/msword',
'.wsdl' : 'application/xml',
'.xbm' : 'image/x-xbitmap',
'.xlb' : 'application/vnd.ms-excel',
# Duplicates :(
'.xls' : 'application/excel',
'.xls' : 'application/vnd.ms-excel',
'.xml' : 'text/xml',
'.xpdl' : 'application/xml',
'.xpm' : 'image/x-xpixmap',
'.xsl' : 'application/xml',
'.xwd' : 'image/x-xwindowdump',
'.zip' : 'application/zip',
}
# These are non-standard types, commonly found in the wild. They will
# only match if strict=0 flag is given to the API methods.
# Please sort these too
common_types = {
'.jpg' : 'image/jpg',
'.mid' : 'audio/midi',
'.midi': 'audio/midi',
'.pct' : 'image/pict',
'.pic' : 'image/pict',
'.pict': 'image/pict',
'.rtf' : 'application/rtf',
'.xul' : 'text/xul'
}
_default_mime_types()
if __name__ == '__main__':
import getopt
USAGE = """\
Usage: mimetypes.py [options] type
Options:
--help / -h -- print this message and exit
--lenient / -l -- additionally search of some common, but non-standard
types.
--extension / -e -- guess extension instead of type
More than one type argument may be given.
"""
def usage(code, msg=''):
print(USAGE)
if msg: print(msg)
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hle',
['help', 'lenient', 'extension'])
except getopt.error as msg:
usage(1, msg)
strict = 1
extension = 0
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-l', '--lenient'):
strict = 0
elif opt in ('-e', '--extension'):
extension = 1
for gtype in args:
if extension:
guess = guess_extension(gtype, strict)
if not guess: print("I don't know anything about type", gtype)
else: print(guess)
else:
guess, encoding = guess_type(gtype, strict)
if not guess: print("I don't know anything about type", gtype)
else: print('type:', guess, 'encoding:', encoding)
| lgpl-3.0 |
TheKysek/MiNode | minode/i2p/listener.py | 1 | 2081 | # -*- coding: utf-8 -*-
import logging
import socket
import threading
from connection import Connection
from i2p.util import receive_line
import shared
class I2PListener(threading.Thread):
def __init__(self, nick, host='127.0.0.1', port=7656):
super().__init__(name='I2P Listener')
self.host = host
self.port = port
self.nick = nick
self.s = None
self.version_reply = []
self.new_socket()
def _receive_line(self):
line = receive_line(self.s)
# logging.debug('I2PListener <- ' + str(line))
return line
def _send(self, command):
# logging.debug('I2PListener -> ' + str(command))
self.s.sendall(command)
def new_socket(self):
self.s = socket.create_connection((self.host, self.port))
self._send(b'HELLO VERSION MIN=3.0 MAX=3.3\n')
self.version_reply = self._receive_line().split()
assert b'RESULT=OK' in self.version_reply
self._send(b'STREAM ACCEPT ID=' + self.nick + b'\n')
reply = self._receive_line().split(b' ')
assert b'RESULT=OK' in reply
self.s.settimeout(1)
def run(self):
while not shared.shutting_down:
try:
destination = self._receive_line().split()[0]
logging.info('Incoming I2P connection from: {}'.format(destination.decode()))
hosts = set()
for c in shared.connections.copy():
hosts.add(c.host)
for d in shared.i2p_dialers.copy():
hosts.add(d.destination)
if destination in hosts:
logging.debug('Rejecting duplicate I2P connection.')
self.s.close()
else:
c = Connection(destination, 'i2p', self.s, 'i2p', True, destination)
c.start()
shared.connections.add(c)
self.new_socket()
except socket.timeout:
pass
logging.debug('Shutting down I2P Listener')
| mit |
kmoocdev/edx-platform | lms/djangoapps/shoppingcart/migrations/0018_auto__add_donation.py | 120 | 15611 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Donation'
db.create_table('shoppingcart_donation', (
('orderitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['shoppingcart.OrderItem'], unique=True, primary_key=True)),
('donation_type', self.gf('django.db.models.fields.CharField')(default='general', max_length=32)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
))
db.send_create_signal('shoppingcart', ['Donation'])
def backwards(self, orm):
# Deleting model 'Donation'
db.delete_table('shoppingcart_donation')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 2, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 2, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_order'", 'null': 'True', 'to': "orm['shoppingcart.Order']"})
},
'shoppingcart.donation': {
'Meta': {'object_name': 'Donation', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'general'", 'max_length': '32'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.invoice': {
'Meta': {'object_name': 'Invoice'},
'address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'address_line_3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'total_amount': ('django.db.models.fields.FloatField', [], {}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.registrationcoderedemption': {
'Meta': {'object_name': 'RegistrationCodeRedemption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']", 'null': 'True'}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 2, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 |
nachandr/cfme_tests | cfme/tests/cloud/test_providers.py | 2 | 58392 | import os
import uuid
from urllib.parse import urljoin
import fauxfactory
import pytest
from wait_for import wait_for
from widgetastic.exceptions import MoveTargetOutOfBoundsException
from widgetastic.widget import Text
from wrapanapi import VmState
from cfme import test_requirements
from cfme.base.credential import Credential
from cfme.cloud.provider import CloudProvider
from cfme.cloud.provider.azure import AzureProvider
from cfme.cloud.provider.ec2 import EC2Endpoint
from cfme.cloud.provider.ec2 import EC2Provider
from cfme.cloud.provider.gce import GCEProvider
from cfme.cloud.provider.openstack import OpenStackProvider
from cfme.cloud.provider.openstack import RHOSEndpoint
from cfme.common.provider import prepare_endpoints
from cfme.common.provider_views import CloudProviderAddView
from cfme.common.provider_views import CloudProvidersView
from cfme.fixtures.provider import enable_provider_regions
from cfme.markers.env_markers.provider import ONE
from cfme.markers.env_markers.provider import SECOND
from cfme.utils import appliance
from cfme.utils import conf
from cfme.utils import ssh
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.conf import credentials
from cfme.utils.generators import random_vm_name
from cfme.utils.log_validator import LogValidator
from cfme.utils.providers import get_crud
from cfme.utils.providers import list_providers
from cfme.utils.providers import ProviderFilter
from cfme.utils.rest import assert_response
from cfme.utils.update import update
from cfme.utils.wait import wait_for_decorator
pytestmark = [pytest.mark.provider([CloudProvider], scope="function")]
# path, where powershell scripts are located and where cfme image is downloaded,
# needed for test_create_azure_vm_from_azure_image
SPACE = '/mnt/space/'
@pytest.fixture(scope='function')
def enable_regions(provider):
enable_provider_regions(provider)
@pytest.fixture(scope='function')
def az_pwsh_vm(appliance):
"""
azure_pwsh contains powershell and necessary scripts to upload vhd, create VM, get ip of the
resource and delete the VM.
Find the provider that contains that template.
The example of the configuration can be found in data/az_pwsh_cloudinit.cfg
"""
filter_pwsh_template = ProviderFilter(required_fields=[['templates', 'powershell_vm']])
providers = list_providers(filters=[filter_pwsh_template])
if not providers:
pytest.skip("There's no provider that contains a template with powershell")
# If there's more than 1 provider that has the template, we select the first
provider = providers[0]
vm_name = random_vm_name(context="pwsh")
pwsh_vm = provider.data.templates.powershell_vm.name
collection = provider.appliance.provider_based_collection(provider)
try:
vm = collection.instantiate(vm_name, provider, pwsh_vm)
vm.create_on_provider(allow_skip="default")
except IndexError:
from cfme.exceptions import ItemNotFound
raise ItemNotFound('VM with powershell not found!')
vm.mgmt.ensure_state(VmState.RUNNING)
@wait_for_decorator(timeout="10m", delay=5)
def get_ip_address():
ip = vm.ip_address
return ip is not None
yield vm
vm.cleanup_on_provider()
@pytest.fixture
def pwsh_ssh(az_pwsh_vm):
"""Provide vm_ssh_client for ssh operations in the test."""
with ssh.SSHClient(hostname=az_pwsh_vm.ip_address,
username=credentials['host_default']['username'],
password=credentials['host_default']['password']) as vm_ssh_client:
yield vm_ssh_client
@pytest.fixture
def connect_az_account(pwsh_ssh):
"""
Connect to Azure account to run further scripts, see
https://docs.microsoft.com/en-us/powershell/azure/authenticate-azureps
"""
path_script = os.path.join(SPACE, 'connect_account.ps1')
connect = pwsh_ssh.run_command(f"pwsh {path_script}", timeout=180)
assert connect.success, "Failed to connect to Azure account"
@pytest.fixture(scope='function')
def cfme_vhd(appliance, pwsh_ssh):
path_script = os.path.join(SPACE, 'get_ip.ps1')
ip_of_recourse = pwsh_ssh.run_command(
fr'pwsh {path_script}| grep -oE "([0-9]{{1,3}}\.){{3}}[0-9]{{1,3}}"',
timeout=60).output.strip()
if ip_of_recourse is not None:
pytest.skip("The resource is taken by some other VM in Azure")
stream = appliance.version.stream()
try:
# need to add the trailing slash for urljoin to work correctly
url = '{}/'.format(conf.cfme_data['basic_info']['cfme_images_url'][stream])
except KeyError:
pytest.skip("Skipping since no such key found in yaml")
image = pwsh_ssh.run_command(
f"""wget -qO- {url} | grep -Po '(?<=href=")[^"]*' | grep azure""",
timeout=30).output.strip()
image_url = urljoin(url, image)
pwsh_ssh.run_command(f"wget {image_url} -P {SPACE}",
timeout=180)
# unpack the archive
vhd = image.replace('zip', 'vhd')
pwsh_ssh.run_command("unzip {} -d {}".format(os.path.join(SPACE, image), SPACE),
timeout=15 * 60)
yield vhd
pwsh_ssh.run_command("rm -f {}".format(os.path.join(SPACE, image)), timeout=180)
pwsh_ssh.run_command("rm -f {}".format(os.path.join(SPACE, vhd)), timeout=180)
@pytest.fixture(scope='function')
def upload_image_to_azure(cfme_vhd, pwsh_ssh):
path_script = os.path.join(SPACE, 'upload_vhd.ps1')
pwsh_ssh.run_command(
r"""sed -i '1s/.*/$BlobNameSource = "{vhd}"/' {script}"""
.format(script=path_script, vhd=cfme_vhd), timeout=30)
pwsh_ssh.run_command(f"pwsh {path_script}", timeout=15 * 60)
@pytest.fixture(scope='function')
def vm_ip(cfme_vhd, pwsh_ssh):
# Create VM in Azure
path_script = os.path.join(SPACE, 'create_vm.ps1')
pwsh_ssh.run_command(
r"""sed -i '1s/.*/$BlobNameSource = "{vhd}"/' {script} &&
sed -i '2s/.*/$BlobNameDest = "{b_dest}"/' {script} &&
sed -i '3s/.*/$VMName = "{name}"/' {script}""".format(
script=path_script,
vhd=cfme_vhd,
b_dest=cfme_vhd.replace('azure', 'test'),
name=cfme_vhd.replace('.x86_64.vhd', '-vm')),
timeout=20)
pwsh_ssh.run_command(f"pwsh {path_script}", timeout=600)
# get the ip of the resource
path_get_ip = os.path.join(SPACE, 'get_ip.ps1')
ip = pwsh_ssh.run_command(
fr'pwsh {path_get_ip}| grep -oE "([0-9]{{1,3}}\.){{3}}[0-9]{{1,3}}"',
timeout=60).output.strip()
yield ip
# Delete the VM
with pwsh_ssh:
pwsh_ssh.run_command(
r"""sed -i '1s/.*/$VMName = "{name}"/' {script}""".format(
script=path_script,
name=cfme_vhd.replace('.x86_64.vhd', '-vm')),
timeout=20)
pwsh_ssh.run_command(f"pwsh {path_script}", timeout=180)
@pytest.fixture
def instance_with_ssh_addition_template(appliance, provider):
form_values = {'customize': {'custom_template': {'name': "SSH key addition template"}}}
instance = appliance.collections.cloud_instances.create(random_vm_name('prov'), provider,
form_values=form_values)
yield instance
instance.delete()
@pytest.fixture
def stack_without_parameters(provider):
stack = provider.mgmt.create_stack(name=fauxfactory.gen_alpha(10),
template_url=provider.data.provisioning.stack_provisioning.template_without_parameters,
capabilities=["CAPABILITY_IAM"])
wait_for(lambda: stack.status_active, delay=15, timeout=900)
yield stack
stack.delete()
@pytest.fixture
def ec2_provider_with_sts_creds(appliance):
collection = appliance.collections.cloud_providers
prov = collection.instantiate(
prov_class=EC2Provider, name=fauxfactory.gen_alphanumeric(5), key='ec2west'
)
assume_role_creds = prov.data.sts_assume_role.credentials
creds = Credential(principal=credentials[assume_role_creds]['username'],
secret=credentials[assume_role_creds]['password'])
endpoint = EC2Endpoint(
assume_role_arn=prov.data.sts_assume_role.role_arn,
credentials=creds)
prov.endpoints = prepare_endpoints(endpoint)
prov.region_name = prov.data.region_name
yield prov
prov.delete()
@pytest.fixture(params=["network_providers", "block_managers", "object_managers"])
def child_provider(request, appliance, provider):
try:
collection = getattr(appliance.collections, request.param).filter({"provider": provider})
except AttributeError:
pytest.skip(
'Appliance collections did not include parametrized child provider type ({})'
.format(request.param))
yield collection.all()[0]
@pytest.mark.tier(3)
@test_requirements.discovery
def test_add_cancelled_validation_cloud(request, appliance):
"""Tests that the flash message is correct when add is cancelled.
Polarion:
assignee: pvala
casecomponent: Cloud
caseimportance: medium
initialEstimate: 1/16h
"""
collection = appliance.collections.cloud_providers
prov = collection.instantiate(prov_class=EC2Provider)
request.addfinalizer(prov.delete_if_exists)
try:
prov.create(cancel=True)
except MoveTargetOutOfBoundsException:
# TODO: Remove once fixed 1475303
prov.create(cancel=True)
view = prov.browser.create_view(CloudProvidersView)
view.flash.assert_success_message('Add of Cloud Provider was cancelled by the user')
@pytest.mark.tier(3)
@test_requirements.discovery
def test_cloud_provider_add_with_bad_credentials(
request, provider, has_no_providers, enable_regions, appliance
):
""" Tests provider add with bad credentials
Metadata:
test_flag: crud
Polarion:
assignee: pvala
casecomponent: Cloud
caseimportance: high
initialEstimate: 1/6h
"""
default_credentials = provider.default_endpoint.credentials
# default settings
flash = 'Login failed due to a bad username or password.'
default_credentials.principal = "bad"
default_credentials.secret = 'notyourday'
if provider.one_of(AzureProvider):
flash = (
"Credential validation was not successful: Incorrect credentials - "
"check your Azure Client ID and Client Key"
)
default_credentials.principal = str(uuid.uuid4())
default_credentials.secret = 'notyourday'
elif provider.one_of(GCEProvider):
flash = 'Credential validation was not successful: Invalid Google JSON key'
default_credentials.service_account = '{"test": "bad"}'
elif provider.one_of(OpenStackProvider):
for endp_name in list(provider.endpoints.keys()):
if endp_name != 'default':
del provider.endpoints[endp_name]
@request.addfinalizer
def clear_form():
from cfme.common.provider_views import ProviderAddView
view = appliance.browser.create_view(ProviderAddView)
if view.is_displayed:
view.cancel.click()
assert not view.is_displayed
with pytest.raises(Exception, match=flash):
provider.create(validate_credentials=True)
@pytest.mark.tier(1)
@pytest.mark.smoke
@test_requirements.discovery
def test_cloud_provider_crud(provider, has_no_providers, enable_regions):
""" Tests provider add with good credentials
Metadata:
test_flag: crud
Polarion:
assignee: pvala
casecomponent: Cloud
caseimportance: high
initialEstimate: 1/3h
"""
provider.create()
provider.validate_stats(ui=True)
old_name = provider.name
with update(provider):
provider.name = str(uuid.uuid4()) # random uuid
with update(provider):
provider.name = old_name # old name
provider.delete()
provider.wait_for_delete()
@pytest.mark.tier(3)
@test_requirements.discovery
def test_type_required_validation_cloud(request, appliance):
"""Test to validate type while adding a provider
Polarion:
assignee: pvala
casecomponent: WebUI
caseimportance: high
initialEstimate: 1/10h
"""
collection = appliance.collections.cloud_providers
view = navigate_to(collection, 'Add')
view.fill({'name': 'foo'})
assert not view.add.active
@pytest.mark.tier(3)
@test_requirements.discovery
def test_name_required_validation_cloud(request, appliance):
"""Tests to validate the name while adding a provider
Polarion:
assignee: pvala
casecomponent: WebUI
caseimportance: high
initialEstimate: 1/15h
"""
collection = appliance.collections.cloud_providers
prov = collection.instantiate(prov_class=EC2Provider,
name=None,
region='US East (Northern Virginia)')
request.addfinalizer(prov.delete_if_exists)
with pytest.raises(AssertionError):
prov.create()
view = prov.create_view(CloudProviderAddView)
assert view.name.help_block == "Required"
assert not view.add.active
@pytest.mark.tier(3)
def test_region_required_validation(request, soft_assert, appliance):
"""Tests to validate the region while adding a provider
Polarion:
assignee: pvala
caseimportance: low
casecomponent: WebUI
initialEstimate: 1/6h
"""
collection = appliance.collections.cloud_providers
prov = collection.instantiate(prov_class=EC2Provider, name=fauxfactory.gen_alphanumeric(5),
region=None)
request.addfinalizer(prov.delete_if_exists)
with pytest.raises(AssertionError):
prov.create()
view = prov.create_view(CloudProviderAddView)
soft_assert(view.region.help_block == "Required")
@pytest.mark.tier(3)
@test_requirements.discovery
def test_host_name_required_validation_cloud(request, appliance):
"""Test to validate the hostname while adding a provider
Polarion:
assignee: pvala
casecomponent: WebUI
caseimportance: high
initialEstimate: 1/15h
"""
endpoint = RHOSEndpoint(hostname=None,
ip_address=fauxfactory.gen_ipaddr(prefix=[10]),
security_protocol=None)
collection = appliance.collections.cloud_providers
prov = collection.instantiate(prov_class=OpenStackProvider,
name=fauxfactory.gen_alphanumeric(5),
endpoints=endpoint)
request.addfinalizer(prov.delete_if_exists)
# It must raise an exception because it keeps on the form
with pytest.raises(AssertionError):
prov.create()
endpoints = prov.create_view(prov.endpoints_form)
assert endpoints.default.hostname.help_block == "Required"
@pytest.mark.tier(3)
@test_requirements.general_ui
def test_api_port_blank_validation(request, appliance):
"""Test to validate blank api port while adding a provider
Polarion:
assignee: pvala
casecomponent: WebUI
caseimportance: low
initialEstimate: 1/6h
"""
endpoint = RHOSEndpoint(hostname=fauxfactory.gen_alphanumeric(5),
ip_address=fauxfactory.gen_ipaddr(prefix=[10]),
api_port='',
security_protocol='Non-SSL')
collection = appliance.collections.cloud_providers
prov = collection.instantiate(prov_class=OpenStackProvider,
name=fauxfactory.gen_alphanumeric(5),
endpoints=endpoint)
request.addfinalizer(prov.delete_if_exists)
# It must raise an exception because it keeps on the form
with pytest.raises(AssertionError):
prov.create()
endpoints = prov.create_view(prov.endpoints_form)
assert endpoints.default.api_port.help_block == "Required"
@pytest.mark.tier(3)
@test_requirements.discovery
def test_name_max_character_validation_cloud(request, cloud_provider):
"""Test to validate that provider can have up to 255 characters in name
Polarion:
assignee: pvala
casecomponent: WebUI
caseimportance: medium
initialEstimate: 1/15h
"""
request.addfinalizer(lambda: cloud_provider.delete_if_exists(cancel=False))
name = fauxfactory.gen_alphanumeric(255)
with update(cloud_provider):
cloud_provider.name = name
assert cloud_provider.exists
@pytest.mark.tier(3)
def test_hostname_max_character_validation_cloud(appliance):
"""Test to validate max character for hostname field
Polarion:
assignee: pvala
casecomponent: WebUI
caseimportance: high
initialEstimate: 1/15h
"""
endpoint = RHOSEndpoint(hostname=fauxfactory.gen_alphanumeric(256),
api_port=None,
security_protocol=None)
collection = appliance.collections.cloud_providers
prov = collection.instantiate(prov_class=OpenStackProvider,
name=fauxfactory.gen_alphanumeric(5),
endpoints=endpoint)
try:
prov.create()
except MoveTargetOutOfBoundsException:
# TODO: Remove once fixed 1475303
prov.create()
except AssertionError:
endpoints = prov.create_view(prov.endpoints_form)
assert endpoints.default.hostname.value == prov.hostname[0:255]
@pytest.mark.tier(3)
@test_requirements.discovery
def test_api_port_max_character_validation_cloud(appliance):
"""Test to validate max character for api port field
Polarion:
assignee: pvala
casecomponent: WebUI
caseimportance: high
initialEstimate: 1/15h
"""
endpoint = RHOSEndpoint(hostname=fauxfactory.gen_alphanumeric(5),
api_port=fauxfactory.gen_alphanumeric(16),
security_protocol='Non-SSL')
collection = appliance.collections.cloud_providers
prov = collection.instantiate(prov_class=OpenStackProvider,
name=fauxfactory.gen_alphanumeric(5),
endpoints=endpoint)
try:
prov.create()
except AssertionError:
view = prov.create_view(prov.endpoints_form)
text = view.default.api_port.value
assert text == prov.default_endpoint.api_port[0:15]
@pytest.mark.tier(2)
@test_requirements.azure
@pytest.mark.provider([AzureProvider], scope="function")
def test_azure_subscription_required(request, provider):
"""
Tests that provider can't be added w/o subscription
Metadata:
test_flag: crud
Polarion:
assignee: anikifor
casecomponent: WebUI
caseposneg: negative
caseimportance: critical
initialEstimate: 1/10h
testSteps:
1.Add Azure Provider w/0 subscription
2.Validate
"""
provider.subscription_id = ''
request.addfinalizer(provider.delete_if_exists)
with pytest.raises(AssertionError,
match='Credential validation was not successful: Incorrect credentials '
'- check your Azure Subscription ID'):
provider.create()
@pytest.mark.tier(2)
@test_requirements.azure
@pytest.mark.provider([AzureProvider], scope="function", selector=ONE)
@pytest.mark.provider([AzureProvider], fixture_name="second_provider", selector=SECOND)
def test_azure_multiple_subscription(
appliance, request, soft_assert, provider, second_provider, setup_provider
):
"""
Verifies that different azure providers have different resources access
Steps:
1. Add all Azure providers
2. Compare their VMs/Templates
Metadata:
test_flag: crud
Polarion:
assignee: anikifor
casecomponent: Cloud
initialEstimate: 1/4h
caseimportance: critical
"""
providers = [provider, second_provider]
prov_inventory = []
for provider in providers:
request.addfinalizer(provider.clear_providers)
provider.create(check_existing=True)
provider.validate_stats()
prov_inventory.append((provider.name,
provider.num_vm(),
provider.num_template()))
for index, prov_a in enumerate(prov_inventory[:-1]):
for prov_b in prov_inventory[index + 1:]:
soft_assert(prov_a[1] != prov_b[1], "Same num_vms for {} and {}".format(prov_a[0],
prov_b[0]))
soft_assert(prov_a[2] != prov_b[2], "Same num_templates for {} and {}".format(prov_a[0],
prov_b[0]))
@pytest.mark.tier(3)
@test_requirements.azure
@pytest.mark.meta(automates=[1495318], blockers=[BZ(1756984)])
@pytest.mark.provider([AzureProvider], scope="function", selector=ONE)
def test_refresh_with_empty_iot_hub_azure(request, provider, setup_provider):
"""
Polarion:
assignee: anikifor
casecomponent: Cloud
caseimportance: low
initialEstimate: 1/6h
setup: prepare env
create an IoT Hub in Azure (using free tier pricing is good enough):
$ az iot hub create --name rmanes-iothub --resource-group iot_rg
testSteps:
1. refresh azure provider
expectedResults:
1. no errors found in logs
Bugzilla:
1495318
"""
result = LogValidator("/var/www/miq/vmdb/log/evm.log", failure_patterns=[r".*ERROR.*"])
result.start_monitoring()
azure = provider.mgmt
if not azure.has_iothub():
iothub_name = fauxfactory.gen_alpha(18, start="potatoiothub_")
azure.create_iothub(iothub_name)
request.addfinalizer(lambda: azure.delete_iothub(iothub_name))
assert azure.has_iothub()
provider.refresh_provider_relationships(wait=600)
assert result.validate(wait="60s")
@test_requirements.azure
@pytest.mark.meta(automates=[1412363])
@pytest.mark.provider([AzureProvider], scope="function", selector=ONE)
@pytest.mark.tier(2)
def test_regions_gov_azure(provider):
"""
This test verifies that Azure Government regions are not included in
the default region list as most users will receive errors if they try
to use them.
Bugzilla:
1412363
Polarion:
assignee: anikifor
casecomponent: Cloud
caseimportance: medium
caseposneg: negative
initialEstimate: 1/8h
setup: Check the region list when adding a Azure Provider.
startsin: 5.7
"""
view = navigate_to(AzureProvider, "Add")
# prefill the provider type to enable regions dropdown
view.fill({'prov_type': provider.type.capitalize()})
available_regions = [opt.text for opt in view.region.all_options]
# no government regions should be available by default
assert not any(reg for reg in available_regions if 'gov' in reg.lower())
@test_requirements.general_ui
@pytest.mark.tier(3)
def test_openstack_provider_has_api_version(appliance):
"""Check whether the Keystone API version field is present for Openstack.
Polarion:
assignee: pvala
casecomponent: WebUI
initialEstimate: 1/4h
"""
view = navigate_to(appliance.collections.cloud_providers, 'Add')
view.fill({"prov_type": "OpenStack"})
assert view.api_version.is_displayed, "API version select is not visible"
def test_openstack_provider_has_dashboard(appliance, openstack_provider):
"""Check whether dashboard view is available for Openstack provider
Bugzilla:
1487142
Polarion:
assignee: pvala
casecomponent: Cloud
initialEstimate: 1/12h
startsin: 5.10
"""
view = navigate_to(openstack_provider, 'Details', use_resetter=False)
view.toolbar.view_selector.select('Dashboard View')
assert view.is_displayed
@test_requirements.ec2
@pytest.mark.tier(3)
@pytest.mark.provider([EC2Provider], scope="function")
def test_select_key_pair_none_while_provisioning(
appliance, request, has_no_providers, provider
):
"""
GH Issue: https://github.com/ManageIQ/manageiq/issues/10575
Requirement: Have an ec2 provider with single key pair
(For now available in South America (Sao Paulo) region)
1. Compute -> Cloud -> Instances
2. Click on Provision Instances in Toolbar
3. Go to Properties
4. Select None in Guest Access Key Pair
5. None should be selected
Polarion:
assignee: mmojzis
casecomponent: WebUI
initialEstimate: 1/4h
"""
if 'govcloud' in provider.data.tags:
pytest.skip("providers with such tag aren't supported for some reason")
provider.region_name = 'South America (Sao Paulo)'
request.addfinalizer(provider.delete_if_exists)
provider.create()
provider.validate()
view = navigate_to(appliance.collections.cloud_instances, 'Provision', wait_for_view=0)
view.image_table[0].click()
view.form.continue_button.click()
view.form.properties.guest_keypair.fill('<None>')
# check drop down was updated with selected value
assert view.form.properties.guest_keypair.read() == '<None>'
@pytest.mark.tier(3)
@test_requirements.azure
@pytest.mark.provider([AzureProvider])
def test_azure_instance_password_requirements(
appliance, has_no_providers, setup_provider
):
"""
Requirement: Have an Azure provider
1. Compute -> Cloud -> Instances
2. Click on Provision Instances in Toolbar
3. Select template.
4. Go to Customisation, fill password that doesn't match the criteria:
* must be 12-72 characters
* have 3 of the following - one lowercase character, one uppercase character,
one number and one special character
5. Error message should be displayed.
Polarion:
assignee: anikifor
casecomponent: WebUI
initialEstimate: 1/4h
"""
view = navigate_to(appliance.collections.cloud_instances, 'Provision')
view.image_table[0].click()
view.form.continue_button.click()
message = (
"'Customize/Password' must be correctly formatted. The password must be 12-72 characters, "
"and have 3 of the following - one lowercase character, one uppercase character, "
"one number and one special character.")
view.form.customize.fill({
"admin_username": "some_value",
})
for pw in ("abcdefghijkl_",
"ABCDEFGHIJKL_",
"ABCDEFGHIJKLa",
"abcdefgh_1A"):
view.form.customize.fill({"root_password": pw})
view.form.submit_button.click()
wait_for(lambda: message in view.flash.read(),
fail_condition=False, num_sec=10, delay=.1)
view.flash.dismiss()
@pytest.mark.tier(3)
@pytest.mark.provider([EC2Provider])
def test_cloud_names_grid_floating_ips(appliance, setup_provider, soft_assert):
"""
Requirement: Cloud provider with floating IPs
Go to Network -> Floating IPs
Change view to grid
Test if names are displayed
Polarion:
assignee: pvala
caseimportance: medium
casecomponent: WebUI
initialEstimate: 1/30h
"""
floating_ips_collection = appliance.collections.network_floating_ips
view = navigate_to(floating_ips_collection, "All")
view.toolbar.view_selector.select('Grid View')
for entity in view.entities.get_all():
title = Text(
view,
f'//*[@id="miq-gtl-view"]//a[@title="{entity.data["address"]}"]'
)
soft_assert(title.is_displayed)
@test_requirements.general_ui
@pytest.mark.tier(3)
def test_display_network_topology(appliance, openstack_provider):
"""
Bugzilla:
1343553
Polarion:
assignee: pvala
casecomponent: WebUI
caseimportance: medium
initialEstimate: 1/8h
testSteps:
1. Add RHOS undercloud provider
2. Make sure it has no floating IPs
3. Go to Networks -> Topology
4. Topology should be shown without errors.
"""
floating_ips_collection = appliance.collections.network_floating_ips
view = navigate_to(floating_ips_collection, "All")
if not view.entities.get_all():
pytest.skip("No Floating IPs needed for this test")
topology_col = appliance.collections.network_topology_elements
view = navigate_to(topology_col, 'All')
assert view.is_displayed
@pytest.mark.provider([CloudProvider], scope='class')
class TestProvidersRESTAPI:
@pytest.mark.tier(3)
@pytest.mark.parametrize('from_detail', [True, False], ids=['from_detail', 'from_collection'])
def test_cloud_networks_query(self, provider, appliance, from_detail, setup_provider):
"""Tests querying cloud providers and cloud_networks collection for network info.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Cloud
caseimportance: low
initialEstimate: 1/3h
"""
if from_detail:
networks = provider.rest_api_entity.cloud_networks
else:
networks = appliance.rest_api.collections.cloud_networks
assert_response(appliance)
wait_for(
lambda: len(networks) != 0,
fail_func=provider.refresh_provider_relationships,
timeout="40s",
silent_failure=True,
)
assert len(networks) > 0, 'No cloud networks found'
assert networks.name == 'cloud_networks'
assert len(networks.all) == networks.subcount
enabled_networks = 0
networks.reload(expand=True)
for network in networks:
assert 'CloudNetwork' in network.type
if network.enabled is True:
enabled_networks += 1
assert enabled_networks >= 1
@pytest.mark.tier(3)
def test_security_groups_query(self, provider, appliance, setup_provider):
"""Tests querying cloud networks subcollection for security groups info.
Metadata:
test_flag: rest
Polarion:
assignee: pvala
casecomponent: Cloud
caseimportance: low
initialEstimate: 1/4h
"""
wait_for(
lambda: len(provider.rest_api_entity.cloud_networks) != 0,
fail_func=provider.refresh_provider_relationships,
timeout="40s",
silent_failure=True,
)
try:
network = provider.rest_api_entity.cloud_networks[0]
except IndexError:
pytest.fail(f'No networks found on cloud provider {provider}')
network.reload(attributes='security_groups')
security_groups = network.security_groups
# "security_groups" needs to be present, even if it's just an empty list
assert isinstance(security_groups, list)
# if it's not empty, check type
if security_groups:
assert 'SecurityGroup' in security_groups[0]['type']
@test_requirements.tag
@pytest.mark.provider([CloudProvider], selector=ONE)
def test_tagvis_provision_fields(setup_provider, request, appliance, user_restricted, tag,
soft_assert):
"""Test for network environment fields for restricted user
Polarion:
assignee: prichard
casecomponent: Tagging
caseimportance: medium
initialEstimate: 1/3h
"""
image = appliance.collections.cloud_images.all()[0]
image.add_tag(tag)
request.addfinalizer(lambda: image.remove_tag(tag))
with user_restricted:
view = navigate_to(appliance.collections.cloud_instances, 'Provision')
soft_assert(len(view.image_table.read()) == 1)
view.image_table.row(name=image.name).click()
view.form.continue_button.click()
environment_fields_check = [view.form.environment.cloud_tenant,
view.form.environment.availability_zone,
view.form.environment.cloud_network,
view.form.environment.security_groups,
view.form.environment.public_ip_address,
view.form.properties.guest_keypair]
soft_assert(len(select) == 1 for select in environment_fields_check)
@test_requirements.general_ui
@pytest.mark.tier(3)
@pytest.mark.provider([OpenStackProvider])
def test_domain_id_validation(request, provider):
""" Test validating Keystone V3 needs domain_id
prerequisites:
* appliance
Steps:
* Navigate add Cloud provider and select OpenStack
* Select Keystone V3 as API Version
* Validate without Domain ID
Polarion:
assignee: pvala
casecomponent: WebUI
initialEstimate: 1/4h
"""
prov = provider
prov.api_version = 'Keystone v3'
prov.keystone_v3_domain_id = None
request.addfinalizer(prov.delete_if_exists)
# It must raise an exception because it keeps on the form
with pytest.raises(AssertionError):
prov.create()
view = prov.create_view(CloudProviderAddView)
# ToDo: Assert proper flash message after BZ-1545520 fix.
assert view.flash[0].type == 'error'
@test_requirements.azure
@pytest.mark.meta(automates=[1315945])
@pytest.mark.provider([AzureProvider], selector=ONE)
def test_vpc_env_selection(setup_provider, request, provider, appliance, provisioning):
"""
Test selection of components in environment page of cloud instances
with selected virtual private cloud
Polarion:
assignee: anikifor
casecomponent: WebUI
initialEstimate: 1/2h
testSteps:
1. Provision an Azure Instance from an Image.
2. At the environment page, try to select components with vpc
expectedResults:
1. Instance provisioned and added successfully
2. Items are selected successfully
Bugzilla:
1315945
"""
vm_name = random_vm_name('prov-az')
template = provisioning.get('image').get('name')
vm = appliance.collections.cloud_instances.instantiate(name=vm_name,
provider=provider,
template_name=template)
request.addfinalizer(vm.cleanup_on_provider)
# default args select vpc
data = vm.vm_default_args
data['template_name'] = template
data['provider_name'] = provider.name
view = navigate_to(vm.parent, 'Provision')
view.form.fill_with(data, on_change=view.form.submit_button)
view.flash.assert_no_error()
# make sure the request succeeds
request_description = f'Provision from [{template}] to [{vm_name}]'
provision_request = appliance.collections.requests.instantiate(request_description)
provision_request.wait_for_request(method='ui', num_sec=15 * 60)
assert provision_request.is_succeeded(method='ui'), "Provisioning failed: {}".format(
provision_request.row.last_message.text)
@pytest.mark.manual
@test_requirements.azure
@pytest.mark.tier(1)
def test_sdn_nsg_arrays_refresh_azure():
"""
Polarion:
assignee: anikifor
casecomponent: Cloud
caseimportance: medium
initialEstimate: 1/6h
testSteps:
1. Add Network Security group on Azure with coma separated port ranges
`1023,1025` rule inbound/outbound ( ATM this feature is not allowed in
East US region of Azure - try West/Central)
2. Add such Azure Region into CFME
3. Refresh provider
expectedResults:
1. The group is successfully added
2. The region is successfully added
3. Refreshed succesfully, there are no errors in the logs
Bugzilla:
1520196
"""
pass
@pytest.mark.manual
@test_requirements.azure
@pytest.mark.tier(2)
def test_provider_flavors_azure():
"""
Verify that the vm flavors in Azure are of the correct sizes and that
the size display in CFME is accurate.
Low priority as it is unlikely to change once set. Will want to check
when azure adds new sizes. Only need to spot check a few values.
For current size values, you can check here:
https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes
Polarion:
assignee: anikifor
casecomponent: Cloud
caseimportance: low
initialEstimate: 1/8h
startsin: 5.6
testSteps:
1. Add Azure provider
2. Navigate to Flavours
expectedResults:
1. The provider is successfully added
2. Flavours are the same as in MS documentation
Bugzilla:
1357086
"""
pass
@pytest.mark.manual
@test_requirements.azure
@pytest.mark.tier(1)
def test_market_place_images_azure():
"""
Polarion:
assignee: anikifor
casecomponent: Cloud
caseimportance: medium
initialEstimate: 1/6h
testSteps:
1.Enable market place images
2.Add Azure provider
3.Refresh the provider
expectedResults:
1.
2.
3. Refresh is done fast (faster than 15 minutes)
Bugzilla:
1491330
"""
pass
@pytest.mark.ignore_stream('5.11')
@test_requirements.azure
@pytest.mark.tier(1)
def test_create_azure_vm_from_azure_image(connect_az_account, cfme_vhd, upload_image_to_azure,
vm_ip):
"""
To run this test Azure account is required.
Azure VM is provisioned from another VM using Powershell, that can be run on any provider.
Polarion:
assignee: anikifor
casecomponent: Cloud
caseimportance: high
initialEstimate: 1/2h
setup: # Virtual Machine Name - as it appears in Azure
$VMName = "myVmName"
$ResourceGroupName = "CFMEQE-Main"
Break
# Existing Azure Deployment Values - Video with instructions
forthcoming.
$AvailabilitySetName = "cfmeqe-as-free"
$AzureLocation = "East US"
$VMDeploymentSize= "Standard_A1"
$StorageAccountName = "cfmeqestore"
$BlobContainerName = "templates"
$VHDName = "cfme-azure-56013.vhd"
$VirtualNetworkName = "cfmeqe"
$NetworkSecurityGroupName = "cfmeqe-nsg"
$VirtualNetworkSubnetName = "default"
$VirtualNetworkAddressPrefix = "10.0.0.0/16"
$VirtualNetworkSubnetAddressPrefix = "10.0.0.0/24"
# Create VM Components
$StorageAccount = Get-AzureRmStorageAccount -ResourceGroupName
$ResourceGroupName -Name $StorageAccountName
$InterfaceName = $VMName
$NetworkSecurityGroupID = Get-AzureRmNetworkSecurityGroup -Name
$NetworkSecurityGroupName -ResourceGroupName $ResourceGroupName
$PIp = New-AzureRmPublicIpAddress -Name $InterfaceName
-ResourceGroupName $ResourceGroupName -Location $AzureLocation
-AllocationMethod Dynamic -Force
$SubnetConfig = New-AzureRmVirtualNetworkSubnetConfig -Name
$VirtualNetworkSubnetName -AddressPrefix
$VirtualNetworkSubnetAddressPrefix
$VNet = New-AzureRmVirtualNetwork -Name $VirtualNetworkName
-ResourceGroupName $ResourceGroupName -Location $AzureLocation
-AddressPrefix $VirtualNetworkAddressPrefix -Subnet $SubnetConfig
-Force
$Interface = New-AzureRmNetworkInterface -Name $InterfaceName
-ResourceGroupName $ResourceGroupName -Location $AzureLocation
-SubnetId $VNet.Subnets[0].Id -PublicIpAddressId $PIp.Id -Force
$AvailabilitySet = Get-AzureRmAvailabilitySet -ResourceGroupName
$ResourceGroupName -Name $AvailabilitySetName
$VirtualMachine = New-AzureRmVMConfig -VMName $VMName -VMSize
$VMDeploymentSize -AvailabilitySetID $AvailabilitySet.Id
$VirtualMachine = Add-AzureRmVMNetworkInterface -VM $VirtualMachine
-Id $Interface.Id
$OSDiskUri = $StorageAccount.PrimaryEndpoints.Blob.ToString() +
$BlobContainerName + "/" + $VHDName
$VirtualMachine = Set-AzureRmVMOSDisk -VM $VirtualMachine -Name
$VMName -VhdUri $OSDiskUri -CreateOption attach -Linux
# Create the Virtual Machine
New-AzureRmVM -ResourceGroupName $ResourceGroupName -Location
$AzureLocation -VM $VirtualMachine
testSteps:
1. Make the VM
2. Config SSH support
3. Config DNS is desired.
4. SSH into new VM with Azure Public IP address and verify it has booted
correctly.
5. Use HTTP to DNS into the appliance web ui and make sure
you can log in.
startsin: 5.6
teardown: When you"re done, delete everything. Make sure at a minimum that the
VM is completely Stopped in Azure.
"""
app = appliance.IPAppliance.from_url(vm_ip)
# Credentials for the provisioned VM from CFME image, this is different to the VM that runs
# powershell scripts as Azure has specific requirements for login/password.
# These credentials are used in the script create_vm.ps1 to provision the VM.
username = credentials['azure_appliance']['username']
password = credentials['azure_appliance']['password']
with ssh.SSHClient(hostname=vm_ip,
username=username,
password=password) as app_ssh_client:
# permit root login over ssh for future appliance configuration
command = 'sed -i "s/.*PermitRootLogin.*/PermitRootLogin yes/g" /etc/ssh/sshd_config'
config = app_ssh_client.run_command(
f'echo {password} | sudo -S {command}', ensure_user=True)
assert config.success
# restart sshd to apply configuration changes
restart = app_ssh_client.run_command(
f'echo {password} | sudo -S systemctl restart sshd', ensure_user=True)
assert restart.success
# unlock root password
unlock = app_ssh_client.run_command(
f'echo {password} | sudo -S passwd -u root', ensure_user=True)
assert unlock.success
app.configure()
app.wait_for_miq_ready()
# Check we can login
logged_in_page = app.server.login()
assert logged_in_page.is_displayed
@test_requirements.ec2
@pytest.mark.provider([EC2Provider], scope="function", selector=ONE)
def test_refresh_with_stack_without_parameters(
provider, has_no_providers, request, stack_without_parameters
):
"""
Polarion:
assignee: mmojzis
casecomponent: Cloud
caseimportance: high
initialEstimate: 1/5h
testSteps:
1. Add cloudformation stack without parameters(https://s3-us-
west-2.amazonaws.com/cloudformation-templates-us-
west-2/Managed_EC2_Batch_Environment.template )
2. Add ec2 provider with cloudformation stack without parameters
expectedResults:
1.
2. Wait for refresh - it should be refreshed successfully without errors
"""
provider.create()
request.addfinalizer(provider.delete_if_exists)
provider.refresh_provider_relationships()
provider.validate_stats(ui=True)
@test_requirements.cloud
@pytest.mark.long_running
@pytest.mark.ignore_stream("5.10", "5.11")
@pytest.mark.meta(automates=[1491330, 1612086])
@pytest.mark.provider([AzureProvider, EC2Provider], scope="function")
def test_public_images_enable_disable(setup_provider, request, appliance, provider):
"""
Bugzilla:
1491330
1612086
The easiest way to simulate AWS API Limit for > 200 items is to enable
and disable public images.
So test for testing public images and for testing AWS API Limit is combined in this test.
Polarion:
assignee: mmojzis
caseimportance: critical
initialEstimate: 1 1/2h
casecomponent: Cloud
testSteps:
1. Enable public images for ec2
2. Add ec2 provider
3. Wait for its refresh(It can take more than 30 minutes)
4. Disable public images for ec2
5. Wait for its refresh(It can take more than 30 minutes)
expectedResults:
1.
2.
3. Refresh should be successful and public images collected
4.
5. Refresh should be successful and public images uncollected
"""
# if provider gets stuck loading images it could take more than two hours to be in operating
# state which can cause other test to fail so better to delete provider for safety
request.addfinalizer(lambda: provider.delete_if_exists())
request.addfinalizer(lambda: appliance.set_public_images(provider, enabled=False))
# enable
public_provider_images_min = 20000 if provider.one_of(AzureProvider) else 40000
private_provider_images_max = 5000
appliance.set_public_images(provider, enabled=True)
provider.refresh_provider_relationships(method='ui')
wait_for(lambda: int(provider.load_details(refresh=True).entities.summary("Relationships")
.get_text_of("Images")) > public_provider_images_min, delay=120, timeout=3600 * 3)
# disable
appliance.set_public_images(provider, enabled=False)
provider.refresh_provider_relationships(method='ui')
wait_for(lambda: int(provider.load_details(refresh=True).entities.summary("Relationships")
.get_text_of("Images")) < private_provider_images_max, delay=120, timeout=3600 * 3)
@test_requirements.ec2
@pytest.mark.provider([EC2Provider], scope="function", selector=ONE)
def test_create_sns_topic(has_no_providers, provider, request):
"""
Requires: No SNS topic(AWS_Config) for tested region
Polarion:
assignee: mmojzis
casecomponent: Cloud
caseimportance: medium
initialEstimate: 1/6h
startsin: 5.8
testSteps:
1. Add an ec2 provider with tested region
2. Wait 3 minutes
expectedResults:
1.
2. Check SNS topic exists for this region in AWS
"""
# preparations for test
request.addfinalizer(provider.delete_if_exists)
topic = provider.mgmt.get_arn_if_topic_exists('AWSConfig_topic')
if topic:
provider.mgmt.delete_topic(topic)
# SNS topic should be automatically created during provider creation in CFME
provider.create()
new_topic = wait_for(lambda: provider.mgmt.get_arn_if_topic_exists('AWSConfig_topic'), delay=15,
timeout=300)
# set topic targets in the environment so refreshes run correctly
provider.mgmt.set_sns_topic_target_for_all_cw_rules(new_topic)
@test_requirements.ec2
@pytest.mark.provider([EC2Provider], scope="function", selector=ONE)
def test_add_delete_add_provider(setup_provider, provider, request):
"""
Polarion:
assignee: mmojzis
casecomponent: Cloud
initialEstimate: 1h
caseimportance: critical
testSteps:
1. Add ec2 provider
2. Delete ec2 provider
3. Add ec2 provider
expectedResults:
1.
2.
3. Ec2 provider should be successfully added again without any issues
"""
provider.delete()
provider.create()
request.addfinalizer(provider.delete_if_exists)
provider.refresh_provider_relationships()
provider.validate_stats(ui=True)
@test_requirements.ec2
@pytest.mark.provider([EC2Provider], scope="function", selector=ONE)
def test_deploy_instance_with_ssh_addition_template(setup_provider,
instance_with_ssh_addition_template):
"""
Requirement: EC2 provider
Polarion:
assignee: mmojzis
casecomponent: Cloud
caseimportance: medium
initialEstimate: 1/6h
testSteps:
1. Provision an instance
2. Select Choose Automatically in Environment -> Placement
3. Select SSH key addition template in Customize -> Customize Template
4. Provision instance
expectedResults:
1.
2.
3.
4. Instance should be provisioned without any errors
"""
if not instance_with_ssh_addition_template.exists:
pytest.fail('Instance with ssh addition template was not created successfully!')
@test_requirements.ec2
@pytest.mark.manual
def test_add_ec2_provider_with_instance_without_name():
"""
Polarion:
assignee: mmojzis
casecomponent: Cloud
caseimportance: high
initialEstimate: 1/6h
testSteps:
1. Add an ec2 provider with instance without name
2. Wait for refresh
expectedResults:
1.
2. Refresh should complete without errors
"""
pass
@pytest.mark.provider([EC2Provider], scope="function", selector=ONE)
@test_requirements.ec2
def test_regions_up_to_date(provider):
"""
Polarion:
assignee: mmojzis
casecomponent: Cloud
caseimportance: high
initialEstimate: 1/3h
testSteps:
1. Compare regions in AWS Console with regions for EC2 in CFME
expectedResults:
1. There should be same regions in CFME as in AWS Console.
"""
regions_provider = provider.mgmt.list_regions(verbose=True)
view = navigate_to(CloudProvider, 'Add')
view.prov_type.fill("Amazon EC2")
regions_cfme = view.region.all_options
# Delete option <Choose>
regions_cfme.pop(0)
regions_cfme_texts = [option.text for option in regions_cfme]
# fixing recent change in AWS naming from EU to Europe:
regions_cfme_texts = [region.replace('EU', 'Europe') for region in regions_cfme_texts]
regions_not_in_cfme = set(regions_provider) - set(regions_cfme_texts)
extra_regions_in_cfme = set(regions_cfme_texts) - set(regions_provider)
if len(regions_not_in_cfme) > 0:
pytest.fail(f"Regions {regions_not_in_cfme} are not in CFME!")
if len(extra_regions_in_cfme) > 0:
pytest.fail(f"Extra regions in CFME: {extra_regions_in_cfme}")
@test_requirements.ec2
@pytest.mark.manual
def test_add_ec2_provider_with_non_default_url_endpoint():
"""
Polarion:
assignee: mmojzis
casecomponent: Cloud
caseimportance: high
initialEstimate: 1/6h
testSteps:
1. Add an ec2 provider with non default url endpoint
2. Wait for refresh
expectedResults:
1. Provider should be added with no issues
2. Refresh should complete without errors
"""
pass
@test_requirements.ec2
@pytest.mark.ignore_stream("5.10")
def test_add_ec2_provider_with_sts_assume_role(appliance, ec2_provider_with_sts_creds):
"""
Requires:
The requirement is only on EC2 side and needs to be added manually once.
1. Role which has all the required permissions to manage CFME
2. Edit Trust relationship policy for this role to:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::NNNNNNNNNNNN:root"
},
"Action": "sts:AssumeRole"
}
]
}
3. Have policy with AssumeRole permission:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "sts:AssumeRole",
"Resource": "arn:aws:iam::NNNNNNNNNNNN:role/RoleForCFME"
}
]
}
4. Have an user with only attached policy created in last step
Polarion:
assignee: mmojzis
casecomponent: Cloud
initialEstimate: 1/2h
caseimportance: high
casecomponent: Cloud
testSteps:
1. Go to Compute -> Cloud -> Providers
2. Add EC2 Provider with these fields filled in:
expectedResults:
1.
2. Provider should be successfully added.
"""
ec2_provider_with_sts_creds.create()
ec2_provider_with_sts_creds.validate()
@test_requirements.ec2
@pytest.mark.meta(automates=[1658207])
@pytest.mark.provider([EC2Provider], scope="function", selector=ONE)
def test_add_second_provider(setup_provider, provider, request):
"""
Bugzilla: 1658207
Polarion:
assignee: mmojzis
casecomponent: Cloud
initialEstimate: 1/3h
caseimportance: high
casecomponent: Cloud
testSteps:
1. Go to Compute -> Cloud -> Providers
2. Add EC2 Provider
3. Add another EC2 Provider
expectedResults:
1.
2. Provider should be successfully added.
3. Provider should be successfully added.
"""
second_provider = get_crud(provider.key)
second_provider.name = f"{provider.name}-2"
second_provider.create()
request.addfinalizer(second_provider.delete_if_exists)
second_provider.refresh_provider_relationships()
second_provider.validate_stats(ui=True)
assert provider.exists and second_provider.exists
@test_requirements.ec2
@pytest.mark.meta(automates=[1710599, 1710623])
@pytest.mark.ignore_stream("5.10") # BZ 1710623 was not merged into 5.10
def test_provider_compare_ec2_provider_and_backup_regions(appliance):
"""
Bugzilla:
1710599
1710623
Polarion:
assignee: mmojzis
casecomponent: Cloud
initialEstimate: 1/6h
caseimportance: medium
casecomponent: Cloud
testSteps:
1. Go to Compute -> Cloud -> Providers -> Add a new Cloud Provider
2. Select Provider: Amazon EC2 and list AWS Regions
3. Go to Configuration -> Settings -> Schedules -> Add a new Schedule
4. Select Action: Database Backup, Type: AWS S3 and list AWS Regions
5. Go to Configuration -> Diagnostics -> Region -> Database
6. Select Type: AWS S3 and list AWS Regions
expectedResults:
1.
2.
3.
4.
5.
6. Compare all three lists. They should contain same regions.
"""
view = navigate_to(CloudProvider, 'Add')
view.prov_type.fill("Amazon EC2")
regions_provider_texts = [option.text for option in view.region.all_options if
option.text != "<Choose>"]
regions_provider_texts.sort()
view = navigate_to(appliance.collections.system_schedules, 'Add')
view.form.action_type.fill("Database Backup")
view.form.database_backup.backup_type.fill("AWS S3")
regions_scheduled_backup = view.form.database_backup.backup_settings.aws_region.all_options
regions_scheduled_backup_texts = [option.text for option in regions_scheduled_backup if
option.text != "<Choose>"]
regions_scheduled_backup_texts.sort()
view = navigate_to(appliance.server.zone.region, 'Database')
view.db_backup_settings.backup_type.fill("AWS S3")
regions_immediate_backup = view.db_backup_settings.backup_settings.aws_region.all_options
regions_immediate_backup_texts = [option.text for option in regions_immediate_backup if
option.text != "<Choose>"]
regions_immediate_backup_texts.sort()
assert regions_provider_texts == regions_scheduled_backup_texts
assert regions_provider_texts == regions_immediate_backup_texts
@test_requirements.cloud
@pytest.mark.meta(automates=[1632750], blockers=[BZ(1632750,
unblock=lambda child_provider: "object_managers"
in child_provider)])
@pytest.mark.uncollectif(lambda child_provider, provider:
(provider.one_of(EC2Provider) and (child_provider == "object_managers")) or
(provider.one_of(AzureProvider) and (child_provider !=
'network_providers')),
reason="Storage is not supported by AzureProvider "
"and Object Storage is not supported by EC2Provider")
@test_requirements.cloud
@pytest.mark.provider([AzureProvider, EC2Provider, OpenStackProvider], scope="function")
def test_cloud_provider_dashboard_after_child_provider_remove(
appliance, provider, request, setup_provider_funcscope, child_provider):
"""
Bugzilla: 1632750
Polarion:
assignee: mmojzis
casecomponent: Cloud
initialEstimate: 1/6h
caseimportance: high
casecomponent: Cloud
testSteps:
1. Have a cloud provider added
2. Delete one of its child managers
3. Go to cloud provider Dashboard
expectedResults:
1.
2.
3. Dashboard should load without any issues
"""
child_provider.delete(cancel=False)
# Sometimes provider was not deleted so this preventing to use provider without child providers
# to be used in next tests
@request.addfinalizer
def _wait_for_delete_provider():
provider.delete()
provider.wait_for_delete()
view = navigate_to(provider, "Details")
view.toolbar.view_selector.select('Dashboard View')
view.wait_displayed()
view.flash.assert_no_error()
| gpl-2.0 |
mstreatfield/rez | src/build_utils/distlib/version.py | 198 | 22996 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-386,
distribute-compatible and semantic versioning.
"""
import logging
import re
from .compat import string_types
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?")
comp_re = re.compile(r'^(<=|>=|<|>|!=|==|~=)?\s*([^\s,]+)$')
num_re = re.compile(r'^\d+(\.\d+)*$')
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
m = self.dist_re.match(s)
if not m:
raise ValueError('Not valid: %r' % s)
groups = m.groups('')
self.name = groups[0].strip()
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if groups[2]:
constraints = [c.strip() for c in groups[2].split(',')]
for c in constraints:
m = self.comp_re.match(c)
if not m:
raise ValueError('Invalid %r in %r' % (c, s))
groups = m.groups()
op = groups[0] or '~='
s = groups[1]
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
if not self.num_re.match(vn):
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: Strring or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] == '==':
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP426_VERSION_RE = re.compile(r'^(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(-(\d+(\.\d+)?))?$')
def _pep426_key(s):
s = s.strip()
m = PEP426_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[0].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
pre = groups[3:5]
post = groups[6:8]
dev = groups[9:11]
local = groups[12]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
local = tuple([int(s) for s in local.split('.')])
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return nums, pre, post, dev, local
_normalized_key = _pep426_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # mininum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP426_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[0].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'!=': '_match_ne',
}
def _adjust_local(self, version, constraint, prefix):
if prefix:
strip_local = '-' not in constraint and version._parts[-1]
else:
# both constraint and version are
# NormalizedVersion instances.
# If constraint does not have a local component,
# ensure the version doesn't, either.
strip_local = not constraint._parts[-1] and version._parts[-1]
if strip_local:
s = version._string.split('-', 1)[0]
version = self.version_class(s)
return version, constraint
def _match_lt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version <= constraint
def _match_ge(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version >= constraint
def _match_eq(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_ne(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version == constraint:
return True
if version < constraint:
return False
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is pobably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
@property
def is_prerelease(self):
result = False
for x in self._parts:
if (isinstance(x, string_types) and x.startswith('*') and
x < '*final'):
result = True
break
return result
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile('^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
| gpl-3.0 |
Neamar/django | tests/forms_tests/tests/tests.py | 108 | 16238 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import models
from django.forms import (
CharField, FileField, Form, ModelChoiceField, ModelForm,
)
from django.forms.models import ModelFormMetaclass
from django.test import SimpleTestCase, TestCase
from django.utils import six
from ..models import (
BoundaryModel, ChoiceFieldModel, ChoiceModel, ChoiceOptionModel, Defaults,
FileModel, Group, OptionalMultiChoiceModel,
)
class ChoiceFieldForm(ModelForm):
class Meta:
model = ChoiceFieldModel
fields = '__all__'
class OptionalMultiChoiceModelForm(ModelForm):
class Meta:
model = OptionalMultiChoiceModel
fields = '__all__'
class ChoiceFieldExclusionForm(ModelForm):
multi_choice = CharField(max_length=50)
class Meta:
exclude = ['multi_choice']
model = ChoiceFieldModel
class EmptyCharLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice']
class EmptyIntegerLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice_integer']
class EmptyCharLabelNoneChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ['name', 'choice_string_w_none']
class FileForm(Form):
file1 = FileField()
class TestTicket12510(TestCase):
''' It is not necessary to generate choices for ModelChoiceField (regression test for #12510). '''
def setUp(self):
self.groups = [Group.objects.create(name=name) for name in 'abc']
def test_choices_not_fetched_when_not_rendering(self):
# only one query is required to pull the model from DB
with self.assertNumQueries(1):
field = ModelChoiceField(Group.objects.order_by('-name'))
self.assertEqual('a', field.clean(self.groups[0].pk).name)
class TestTicket14567(TestCase):
"""
Check that the return values of ModelMultipleChoiceFields are QuerySets
"""
def test_empty_queryset_return(self):
"If a model's ManyToManyField has blank=True and is saved with no data, a queryset is returned."
option = ChoiceOptionModel.objects.create(name='default')
form = OptionalMultiChoiceModelForm({'multi_choice_optional': '', 'multi_choice': [option.pk]})
self.assertTrue(form.is_valid())
# Check that the empty value is a QuerySet
self.assertIsInstance(form.cleaned_data['multi_choice_optional'], models.query.QuerySet)
# While we're at it, test whether a QuerySet is returned if there *is* a value.
self.assertIsInstance(form.cleaned_data['multi_choice'], models.query.QuerySet)
class ModelFormCallableModelDefault(TestCase):
def test_no_empty_option(self):
"If a model's ForeignKey has blank=False and a default, no empty option is created (Refs #10792)."
option = ChoiceOptionModel.objects.create(name='default')
choices = list(ChoiceFieldForm().fields['choice'].choices)
self.assertEqual(len(choices), 1)
self.assertEqual(choices[0], (option.pk, six.text_type(option)))
def test_callable_initial_value(self):
"The initial value for a callable default returning a queryset is the pk (refs #13769)"
ChoiceOptionModel.objects.create(id=1, name='default')
ChoiceOptionModel.objects.create(id=2, name='option 2')
ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(ChoiceFieldForm().as_p(), """<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="1" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="1" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label> <select multiple="multiple" name="multi_choice" id="id_multi_choice">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="1" id="initial-id_multi_choice_0" /></p>
<p><label for="id_multi_choice_int">Multi choice int:</label> <select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int">
<option value="1" selected="selected">ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="1" id="initial-id_multi_choice_int_0" /></p>""")
def test_initial_instance_value(self):
"Initial instances for model fields may also be instances (refs #7287)"
ChoiceOptionModel.objects.create(id=1, name='default')
obj2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
obj3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
self.assertHTMLEqual(ChoiceFieldForm(initial={
'choice': obj2,
'choice_int': obj2,
'multi_choice': [obj2, obj3],
'multi_choice_int': ChoiceOptionModel.objects.exclude(name="default"),
}).as_p(), """<p><label for="id_choice">Choice:</label> <select name="choice" id="id_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice" value="2" id="initial-id_choice" /></p>
<p><label for="id_choice_int">Choice int:</label> <select name="choice_int" id="id_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select><input type="hidden" name="initial-choice_int" value="2" id="initial-id_choice_int" /></p>
<p><label for="id_multi_choice">Multi choice:</label> <select multiple="multiple" name="multi_choice" id="id_multi_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3" selected="selected">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice" value="2" id="initial-id_multi_choice_0" />
<input type="hidden" name="initial-multi_choice" value="3" id="initial-id_multi_choice_1" /></p>
<p><label for="id_multi_choice_int">Multi choice int:</label> <select multiple="multiple" name="multi_choice_int" id="id_multi_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected="selected">ChoiceOption 2</option>
<option value="3" selected="selected">ChoiceOption 3</option>
</select><input type="hidden" name="initial-multi_choice_int" value="2" id="initial-id_multi_choice_int_0" />
<input type="hidden" name="initial-multi_choice_int" value="3" id="initial-id_multi_choice_int_1" /></p>""")
class FormsModelTestCase(TestCase):
def test_unicode_filename(self):
# FileModel with unicode filename and data #########################
f = FileForm(data={}, files={'file1': SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))}, auto_id=False)
self.assertTrue(f.is_valid())
self.assertIn('file1', f.cleaned_data)
m = FileModel.objects.create(file=f.cleaned_data['file1'])
self.assertEqual(m.file.name, 'tests/\u6211\u96bb\u6c23\u588a\u8239\u88dd\u6eff\u6652\u9c54.txt')
m.delete()
def test_boundary_conditions(self):
# Boundary conditions on a PostitiveIntegerField #########################
class BoundaryForm(ModelForm):
class Meta:
model = BoundaryModel
fields = '__all__'
f = BoundaryForm({'positive_integer': 100})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': 0})
self.assertTrue(f.is_valid())
f = BoundaryForm({'positive_integer': -100})
self.assertFalse(f.is_valid())
def test_formfield_initial(self):
# Formfield initial values ########
# If the model has default values for some fields, they are used as the formfield
# initial values.
class DefaultsForm(ModelForm):
class Meta:
model = Defaults
fields = '__all__'
self.assertEqual(DefaultsForm().fields['name'].initial, 'class default value')
self.assertEqual(DefaultsForm().fields['def_date'].initial, datetime.date(1980, 1, 1))
self.assertEqual(DefaultsForm().fields['value'].initial, 42)
r1 = DefaultsForm()['callable_default'].as_widget()
r2 = DefaultsForm()['callable_default'].as_widget()
self.assertNotEqual(r1, r2)
# In a ModelForm that is passed an instance, the initial values come from the
# instance's values, not the model's defaults.
foo_instance = Defaults(name='instance value', def_date=datetime.date(1969, 4, 4), value=12)
instance_form = DefaultsForm(instance=foo_instance)
self.assertEqual(instance_form.initial['name'], 'instance value')
self.assertEqual(instance_form.initial['def_date'], datetime.date(1969, 4, 4))
self.assertEqual(instance_form.initial['value'], 12)
from django.forms import CharField
class ExcludingForm(ModelForm):
name = CharField(max_length=255)
class Meta:
model = Defaults
exclude = ['name', 'callable_default']
f = ExcludingForm({'name': 'Hello', 'value': 99, 'def_date': datetime.date(1999, 3, 2)})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data['name'], 'Hello')
obj = f.save()
self.assertEqual(obj.name, 'class default value')
self.assertEqual(obj.value, 99)
self.assertEqual(obj.def_date, datetime.date(1999, 3, 2))
class RelatedModelFormTests(SimpleTestCase):
def test_invalid_loading_order(self):
"""
Test for issue 10405
"""
class A(models.Model):
ref = models.ForeignKey("B", models.CASCADE)
class Meta:
model = A
fields = '__all__'
self.assertRaises(ValueError, ModelFormMetaclass, str('Form'), (ModelForm,), {'Meta': Meta})
class B(models.Model):
pass
def test_valid_loading_order(self):
"""
Test for issue 10405
"""
class C(models.Model):
ref = models.ForeignKey("D", models.CASCADE)
class D(models.Model):
pass
class Meta:
model = C
fields = '__all__'
self.assertTrue(issubclass(ModelFormMetaclass(str('Form'), (ModelForm,), {'Meta': Meta}), ModelForm))
class ManyToManyExclusionTestCase(TestCase):
def test_m2m_field_exclusion(self):
# Issue 12337. save_instance should honor the passed-in exclude keyword.
opt1 = ChoiceOptionModel.objects.create(id=1, name='default')
opt2 = ChoiceOptionModel.objects.create(id=2, name='option 2')
opt3 = ChoiceOptionModel.objects.create(id=3, name='option 3')
initial = {
'choice': opt1,
'choice_int': opt1,
}
data = {
'choice': opt2.pk,
'choice_int': opt2.pk,
'multi_choice': 'string data!',
'multi_choice_int': [opt1.pk],
}
instance = ChoiceFieldModel.objects.create(**initial)
instance.multi_choice = instance.multi_choice_int = [opt2, opt3]
form = ChoiceFieldExclusionForm(data=data, instance=instance)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['multi_choice'], data['multi_choice'])
form.save()
self.assertEqual(form.instance.choice.pk, data['choice'])
self.assertEqual(form.instance.choice_int.pk, data['choice_int'])
self.assertEqual(list(form.instance.multi_choice.all()), [opt2, opt3])
self.assertEqual([obj.pk for obj in form.instance.multi_choice_int.all()], data['multi_choice_int'])
class EmptyLabelTestCase(TestCase):
def test_empty_field_char(self):
f = EmptyCharLabelChoiceForm()
self.assertHTMLEqual(f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" /></p>
<p><label for="id_choice">Choice:</label> <select id="id_choice" name="choice">
<option value="" selected="selected">No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>""")
def test_empty_field_char_none(self):
f = EmptyCharLabelNoneChoiceForm()
self.assertHTMLEqual(f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" /></p>
<p><label for="id_choice_string_w_none">Choice string w none:</label> <select id="id_choice_string_w_none" name="choice_string_w_none">
<option value="" selected="selected">No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>""")
def test_save_empty_label_forms(self):
# Test that saving a form with a blank choice results in the expected
# value being stored in the database.
tests = [
(EmptyCharLabelNoneChoiceForm, 'choice_string_w_none', None),
(EmptyIntegerLabelChoiceForm, 'choice_integer', None),
(EmptyCharLabelChoiceForm, 'choice', ''),
]
for form, key, expected in tests:
f = form({'name': 'some-key', key: ''})
self.assertTrue(f.is_valid())
m = f.save()
self.assertEqual(expected, getattr(m, key))
self.assertEqual('No Preference',
getattr(m, 'get_{}_display'.format(key))())
def test_empty_field_integer(self):
f = EmptyIntegerLabelChoiceForm()
self.assertHTMLEqual(f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" /></p>
<p><label for="id_choice_integer">Choice integer:</label> <select id="id_choice_integer" name="choice_integer">
<option value="" selected="selected">No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>""")
def test_get_display_value_on_none(self):
m = ChoiceModel.objects.create(name='test', choice='', choice_integer=None)
self.assertIsNone(m.choice_integer)
self.assertEqual('No Preference', m.get_choice_integer_display())
def test_html_rendering_of_prepopulated_models(self):
none_model = ChoiceModel(name='none-test', choice_integer=None)
f = EmptyIntegerLabelChoiceForm(instance=none_model)
self.assertHTMLEqual(f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" value="none-test"/></p>
<p><label for="id_choice_integer">Choice integer:</label> <select id="id_choice_integer" name="choice_integer">
<option value="" selected="selected">No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>""")
foo_model = ChoiceModel(name='foo-test', choice_integer=1)
f = EmptyIntegerLabelChoiceForm(instance=foo_model)
self.assertHTMLEqual(f.as_p(),
"""<p><label for="id_name">Name:</label> <input id="id_name" maxlength="10" name="name" type="text" value="foo-test"/></p>
<p><label for="id_choice_integer">Choice integer:</label> <select id="id_choice_integer" name="choice_integer">
<option value="">No Preference</option>
<option value="1" selected="selected">Foo</option>
<option value="2">Bar</option>
</select></p>""")
| bsd-3-clause |
Phrozyn/MozDef | tests/mozdef_util/query_models/test_less_than_match.py | 2 | 1534 | from positive_test_suite import PositiveTestSuite
from negative_test_suite import NegativeTestSuite
from mozdef_util.query_models import LessThanMatch
class TestLessThanMatchPositiveTestSuite(PositiveTestSuite):
def query_tests(self):
boundry_date = "2016-08-12T21:07:12.316450+00:00"
tests = {
LessThanMatch('utctimestamp', boundry_date): [
{'utctimestamp': '2015-08-12T21:07:12.316450+00:00'},
{'utctimestamp': '2016-02-12T21:07:12.316450+00:00'},
{'utctimestamp': '2016-08-11T21:07:12.316450+00:00'},
{'utctimestamp': '2016-08-12T20:07:12.316450+00:00'},
],
}
return tests
class TestLessThanMatchNegativeTestSuite(NegativeTestSuite):
def query_tests(self):
boundry_date = "2016-08-12T21:07:12.316450+00:00"
tests = {
LessThanMatch('utctimestamp', boundry_date): [
{'utctimestamp': '2017-08-12T21:07:12.316450+00:00'},
{'utctimestamp': '2016-09-12T21:07:12.316450+00:00'},
{'utctimestamp': '2016-08-14T21:07:12.316450+00:00'},
{'utctimestamp': '2016-08-12T23:07:12.316450+00:00'},
{'utctimestamp': '2016-08-12T21:08:12.316450+00:00'},
{'utctimestamp': '2016-08-12T21:07:13.316450+00:00'},
{'utctimestamp': '2016-08-12T21:07:12.416450+00:00'},
{'utctimestamp': '2016-08-12T21:07:12.316450+00:00'},
],
}
return tests
| mpl-2.0 |
dagdaggo/coala | coalib/parsing/CliParsing.py | 9 | 4694 | import os
import sys
from argparse import ArgumentParser
from collections import OrderedDict
from coalib.parsing.DefaultArgParser import default_arg_parser
from coalib.parsing.LineParser import LineParser
from coalib.settings.Section import Section, append_to_sections
def parse_cli(arg_list=None,
origin=os.getcwd(),
arg_parser=None,
key_value_delimiters=('=', ':'),
comment_seperators=(),
key_delimiters=(',',),
section_override_delimiters=(".",)):
"""
Parses the CLI arguments and creates sections out of it.
:param arg_list: The CLI argument list.
:param origin: Directory used to interpret relative
paths given as argument.
:param arg_parser: Instance of ArgParser that is used to
parse none-setting arguments.
:param key_value_delimiters: Delimiters to separate key and value
in setting arguments.
:param comment_seperators: Allowed prefixes for comments.
:param key_delimiters: Delimiter to separate multiple keys of
a setting argument.
:param section_override_delimiters: The delimiter to delimit the section
from the key name (e.g. the '.' in
sect.key = value).
:return: A dictionary holding section names
as keys and the sections themselves
as value.
"""
# Note: arg_list can also be []. Hence we cannot use
# `arg_list = arg_list or default_list`
arg_list = sys.argv[1:] if arg_list is None else arg_list
arg_parser = arg_parser or default_arg_parser()
origin += os.path.sep
sections = OrderedDict(default=Section('Default'))
line_parser = LineParser(key_value_delimiters,
comment_seperators,
key_delimiters,
{},
section_override_delimiters)
for arg_key, arg_value in sorted(
vars(arg_parser.parse_args(arg_list)).items()):
if arg_key == 'settings' and arg_value is not None:
parse_custom_settings(sections,
arg_value,
origin,
line_parser)
else:
if isinstance(arg_value, list):
arg_value = ",".join([str(val) for val in arg_value])
append_to_sections(sections,
arg_key,
arg_value,
origin,
from_cli=True)
return sections
def parse_custom_settings(sections,
custom_settings_list,
origin,
line_parser):
"""
Parses the custom settings given to coala via ``-S something=value``.
:param sections: The Section dictionary to add to (mutable).
:param custom_settings_list: The list of settings strings.
:param origin: The originating directory.
:param line_parser: The LineParser to use.
"""
for setting_definition in custom_settings_list:
(_, key_touples, value, _) = line_parser.parse(setting_definition)
for key_touple in key_touples:
append_to_sections(sections,
key=key_touple[1],
value=value,
origin=origin,
section_name=key_touple[0],
from_cli=True)
def check_conflicts(sections):
'''
Checks if there are any conflicting aruments passed
:return: True if no conflicts
:raises SystemExit: If there are conflicting arguments (exit code: 2)
'''
conflicts = {'no_config': {'save', 'find_config'}}
conflicting_keys = conflicts.keys()
for section in sections:
keys = set(sections[section])
possible_conflicts = keys & conflicting_keys
for key in possible_conflicts:
intersection = keys & conflicts[key]
if len(intersection) > 0:
ArgumentParser().exit(2,
key + " cannot be given at the same "
"time with " + ', '.join(intersection))
return True
| agpl-3.0 |
lopezezequiel/qmsg | qmsg/settingstravis.py | 1 | 3839 | """
Django settings for qmsg project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4xnr-ieh9ewxt12oqytn26gtk4y+v87iogbrcy8%3me^txk#sd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'testserver']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'apirestv1'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'qmsg.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_ROOT, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'qmsg.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ['DATABASE_NAME'],
'USER': os.environ['DATABASE_USER'],
'PASSWORD': os.environ['DATABASE_PASSWORD'],
'HOST': os.environ['DATABASE_HOST'],
'PORT': os.environ['DATABASE_PORT'],
'TEST': {
'NAME': os.environ['TEST_DATABASE_NAME'],
},
},
'sqlite3': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test_db',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'es-AR'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| gpl-3.0 |
gradel/mezzanine-blocks | setup.py | 2 | 1214 | from setuptools import setup, find_packages
NAME = 'mezzanine-blocks'
VERSION = '0.9.4'
DESCRIPTION = """
A fork of https://github.com/molokov/mezzanine-blocks.git to make it work with Django 1.9
A mezzanine flavored fork of django-flatblocks.
The goal of this project is to be able to easily create custom blocks of
text/HTML in the template, and can be editable via admin.
"""
setup(
name=NAME,
description=DESCRIPTION,
long_description=open('README.md').read(),
version=VERSION,
author='Daverio Antony',
author_email='contact@circonflex.fr',
url='https://github.com/Cajoline/mezzanine-blocks',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
requires=['mezzanine'],
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
)
| bsd-2-clause |
loungin/git-repo | subcmds/prune.py | 90 | 1792 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from color import Coloring
from command import PagedCommand
class Prune(PagedCommand):
common = True
helpSummary = "Prune (delete) already merged topics"
helpUsage = """
%prog [<project>...]
"""
def Execute(self, opt, args):
all_branches = []
for project in self.GetProjects(args):
all_branches.extend(project.PruneHeads())
if not all_branches:
return
class Report(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr='bold')
out = Report(all_branches[0].project.config)
out.project('Pending Branches')
out.nl()
project = None
for branch in all_branches:
if project != branch.project:
project = branch.project
out.nl()
out.project('project %s/' % project.relpath)
out.nl()
commits = branch.commits
date = branch.date
print('%s %-33s (%2d commit%s, %s)' % (
branch.name == project.CurrentBranch and '*' or ' ',
branch.name,
len(commits),
len(commits) != 1 and 's' or ' ',
date))
| apache-2.0 |
yamahata/neutron | neutron/debug/shell.py | 7 | 3478 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux import interface
from neutron.common import legacy
from neutron.debug.debug_agent import NeutronDebugAgent
from neutron.openstack.common import importutils
from neutronclient.common import exceptions as exc
from neutronclient.common import utils
from neutronclient.shell import env, NeutronShell, NEUTRON_API_VERSION
COMMAND_V2 = {
'probe-create': utils.import_class(
'neutron.debug.commands.CreateProbe'),
'probe-delete': utils.import_class(
'neutron.debug.commands.DeleteProbe'),
'probe-list': utils.import_class(
'neutron.debug.commands.ListProbe'),
'probe-clear': utils.import_class(
'neutron.debug.commands.ClearProbe'),
'probe-exec': utils.import_class(
'neutron.debug.commands.ExecProbe'),
'ping-all': utils.import_class(
'neutron.debug.commands.PingAll'),
#TODO(nati) ping, netcat , nmap, bench
}
COMMANDS = {'2.0': COMMAND_V2}
class NeutronDebugShell(NeutronShell):
def __init__(self, api_version):
super(NeutronDebugShell, self).__init__(api_version)
for k, v in COMMANDS[api_version].items():
self.command_manager.add_command(k, v)
def build_option_parser(self, description, version):
parser = super(NeutronDebugShell, self).build_option_parser(
description, version)
default = (
env('NEUTRON_TEST_CONFIG_FILE') or env('QUANTUM_TEST_CONFIG_FILE')
)
parser.add_argument(
'--config-file',
default=default,
help=_('Config file for interface driver '
'(You may also use l3_agent.ini)'))
return parser
def initialize_app(self, argv):
super(NeutronDebugShell, self).initialize_app(argv)
if not self.options.config_file:
raise exc.CommandError(
_("You must provide a config file for bridge -"
" either --config-file or env[NEUTRON_TEST_CONFIG_FILE]"))
client = self.client_manager.neutron
cfg.CONF.register_opts(interface.OPTS)
cfg.CONF.register_opts(NeutronDebugAgent.OPTS)
config.register_interface_driver_opts_helper(cfg.CONF)
config.register_use_namespaces_opts_helper(cfg.CONF)
config.register_root_helper(cfg.CONF)
cfg.CONF(['--config-file', self.options.config_file])
config.setup_logging(cfg.CONF)
legacy.modernize_quantum_config(cfg.CONF)
driver = importutils.import_object(cfg.CONF.interface_driver, cfg.CONF)
self.debug_agent = NeutronDebugAgent(cfg.CONF, client, driver)
def main(argv=None):
return NeutronDebugShell(NEUTRON_API_VERSION).run(argv or sys.argv[1:])
| apache-2.0 |
HeeroLuca/ONEPONE | scripts/build-all.py | 1474 | 10189 | #! /usr/bin/env python
# Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import re
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules", "dtbs"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'KCONFIG_NOTIMESTAMP': 'true' })
make_env.setdefault('CROSS_COMPILE', 'arm-none-linux-gnueabi-')
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
arch_pats = (
r'[fm]sm[0-9]*_defconfig',
r'apq*_defconfig',
r'qsd*_defconfig',
r'msmkrypton*_defconfig',
)
for p in arch_pats:
for n in glob.glob('arch/arm/configs/' + p):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
staging_dir = 'install_staging'
modi_dir = '%s' % staging_dir
hdri_dir = '%s/usr' % staging_dir
shutil.rmtree(os.path.join(dest_dir, staging_dir), ignore_errors=True)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
# Build targets can be dependent upon the completion of previous
# build targets, so build them one at a time.
cmd_line = ['make',
'INSTALL_HDR_PATH=%s' % hdri_dir,
'INSTALL_MOD_PATH=%s' % modi_dir,
'O=%s' % dest_dir]
build_targets = []
for c in make_command:
if re.match(r'^-{1,2}\w', c):
cmd_line.append(c)
else:
build_targets.append(c)
for t in build_targets:
build = Builder(log_name)
result = build.run(cmd_line + [t])
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" %
(target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
dhruvagarwal/django | django/contrib/gis/geos/prepared.py | 328 | 2445 | from .base import GEOSBase
from .error import GEOSException
from .libgeos import geos_version_info
from .prototypes import prepared as capi
class PreparedGeometry(GEOSBase):
"""
A geometry that is prepared for performing certain operations.
At the moment this includes the contains covers, and intersects
operations.
"""
ptr_type = capi.PREPGEOM_PTR
def __init__(self, geom):
# Keeping a reference to the original geometry object to prevent it
# from being garbage collected which could then crash the prepared one
# See #21662
self._base_geom = geom
from .geometry import GEOSGeometry
if not isinstance(geom, GEOSGeometry):
raise TypeError
self.ptr = capi.geos_prepare(geom.ptr)
def __del__(self):
if self._ptr and capi:
capi.prepared_destroy(self._ptr)
def contains(self, other):
return capi.prepared_contains(self.ptr, other.ptr)
def contains_properly(self, other):
return capi.prepared_contains_properly(self.ptr, other.ptr)
def covers(self, other):
return capi.prepared_covers(self.ptr, other.ptr)
def intersects(self, other):
return capi.prepared_intersects(self.ptr, other.ptr)
# Added in GEOS 3.3:
def crosses(self, other):
if geos_version_info()['version'] < '3.3.0':
raise GEOSException("crosses on prepared geometries requires GEOS >= 3.3.0")
return capi.prepared_crosses(self.ptr, other.ptr)
def disjoint(self, other):
if geos_version_info()['version'] < '3.3.0':
raise GEOSException("disjoint on prepared geometries requires GEOS >= 3.3.0")
return capi.prepared_disjoint(self.ptr, other.ptr)
def overlaps(self, other):
if geos_version_info()['version'] < '3.3.0':
raise GEOSException("overlaps on prepared geometries requires GEOS >= 3.3.0")
return capi.prepared_overlaps(self.ptr, other.ptr)
def touches(self, other):
if geos_version_info()['version'] < '3.3.0':
raise GEOSException("touches on prepared geometries requires GEOS >= 3.3.0")
return capi.prepared_touches(self.ptr, other.ptr)
def within(self, other):
if geos_version_info()['version'] < '3.3.0':
raise GEOSException("within on prepared geometries requires GEOS >= 3.3.0")
return capi.prepared_within(self.ptr, other.ptr)
| bsd-3-clause |
sallaire/Sick-Beard | sickbeard/searchBacklog.py | 47 | 7629 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import datetime
import threading
import sickbeard
from sickbeard import db, scheduler
from sickbeard import search_queue
from sickbeard import logger
from sickbeard import ui
#from sickbeard.common import *
class BacklogSearchScheduler(scheduler.Scheduler):
def forceSearch(self):
self.action._set_lastBacklog(1)
self.lastRun = datetime.datetime.fromordinal(1)
def nextRun(self):
if self.action._lastBacklog <= 1:
return datetime.date.today()
else:
return datetime.date.fromordinal(self.action._lastBacklog + self.action.cycleTime)
class BacklogSearcher:
def __init__(self):
self._lastBacklog = self._get_lastBacklog()
self.cycleTime = 7
self.lock = threading.Lock()
self.amActive = False
self.amPaused = False
self.amWaiting = False
self._resetPI()
def _resetPI(self):
self.percentDone = 0
self.currentSearchInfo = {'title': 'Initializing'}
def getProgressIndicator(self):
if self.amActive:
return ui.ProgressIndicator(self.percentDone, self.currentSearchInfo)
else:
return None
def am_running(self):
logger.log(u"amWaiting: "+str(self.amWaiting)+", amActive: "+str(self.amActive), logger.DEBUG)
return (not self.amWaiting) and self.amActive
def searchBacklog(self, which_shows=None):
if which_shows:
show_list = which_shows
else:
show_list = sickbeard.showList
if self.amActive == True:
logger.log(u"Backlog is still running, not starting it again", logger.DEBUG)
return
self._get_lastBacklog()
curDate = datetime.date.today().toordinal()
fromDate = datetime.date.fromordinal(1)
if not which_shows and not curDate - self._lastBacklog >= self.cycleTime:
logger.log(u"Running limited backlog on recently missed episodes only")
fromDate = datetime.date.today() - datetime.timedelta(days=7)
self.amActive = True
self.amPaused = False
#myDB = db.DBConnection()
#numSeasonResults = myDB.select("SELECT DISTINCT(season), showid FROM tv_episodes ep, tv_shows show WHERE season != 0 AND ep.showid = show.tvdb_id AND show.paused = 0 AND ep.airdate > ?", [fromDate.toordinal()])
# get separate lists of the season/date shows
#season_shows = [x for x in show_list if not x.air_by_date]
air_by_date_shows = [x for x in show_list if x.air_by_date]
# figure out how many segments of air by date shows we're going to do
air_by_date_segments = []
for cur_id in [x.tvdbid for x in air_by_date_shows]:
air_by_date_segments += self._get_air_by_date_segments(cur_id, fromDate)
logger.log(u"Air-by-date segments: "+str(air_by_date_segments), logger.DEBUG)
#totalSeasons = float(len(numSeasonResults) + len(air_by_date_segments))
#numSeasonsDone = 0.0
# go through non air-by-date shows and see if they need any episodes
for curShow in show_list:
if curShow.paused:
continue
if curShow.air_by_date:
segments = [x[1] for x in self._get_air_by_date_segments(curShow.tvdbid, fromDate)]
else:
segments = self._get_season_segments(curShow.tvdbid, fromDate)
for cur_segment in segments:
self.currentSearchInfo = {'title': curShow.name + " Season "+str(cur_segment)}
backlog_queue_item = search_queue.BacklogQueueItem(curShow, cur_segment)
if not backlog_queue_item.wantSeason:
logger.log(u"Nothing in season "+str(cur_segment)+" needs to be downloaded, skipping this season", logger.DEBUG)
else:
sickbeard.searchQueueScheduler.action.add_item(backlog_queue_item) #@UndefinedVariable
# don't consider this an actual backlog search if we only did recent eps
# or if we only did certain shows
if fromDate == datetime.date.fromordinal(1) and not which_shows:
self._set_lastBacklog(curDate)
self.amActive = False
self._resetPI()
def _get_lastBacklog(self):
logger.log(u"Retrieving the last check time from the DB", logger.DEBUG)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM info")
if len(sqlResults) == 0:
lastBacklog = 1
elif sqlResults[0]["last_backlog"] == None or sqlResults[0]["last_backlog"] == "":
lastBacklog = 1
else:
lastBacklog = int(sqlResults[0]["last_backlog"])
self._lastBacklog = lastBacklog
return self._lastBacklog
def _get_season_segments(self, tvdb_id, fromDate):
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT DISTINCT(season) as season FROM tv_episodes WHERE showid = ? AND season > 0 and airdate > ?", [tvdb_id, fromDate.toordinal()])
return [int(x["season"]) for x in sqlResults]
def _get_air_by_date_segments(self, tvdb_id, fromDate):
# query the DB for all dates for this show
myDB = db.DBConnection()
num_air_by_date_results = myDB.select("SELECT airdate, showid FROM tv_episodes ep, tv_shows show WHERE season != 0 AND ep.showid = show.tvdb_id AND show.paused = 0 ANd ep.airdate > ? AND ep.showid = ?",
[fromDate.toordinal(), tvdb_id])
# break them apart into month/year strings
air_by_date_segments = []
for cur_result in num_air_by_date_results:
cur_date = datetime.date.fromordinal(int(cur_result["airdate"]))
cur_date_str = str(cur_date)[:7]
cur_tvdb_id = int(cur_result["showid"])
cur_result_tuple = (cur_tvdb_id, cur_date_str)
if cur_result_tuple not in air_by_date_segments:
air_by_date_segments.append(cur_result_tuple)
return air_by_date_segments
def _set_lastBacklog(self, when):
logger.log(u"Setting the last backlog in the DB to " + str(when), logger.DEBUG)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM info")
if len(sqlResults) == 0:
myDB.action("INSERT INTO info (last_backlog, last_TVDB) VALUES (?,?)", [str(when), 0])
else:
myDB.action("UPDATE info SET last_backlog=" + str(when))
def run(self):
try:
self.searchBacklog()
except:
self.amActive = False
raise
| gpl-3.0 |
PanDAWMS/pilot | NordugridATLASSiteInformation.py | 3 | 1933 | # Class definition:
# NordugridATLASSiteInformation
# This class is the Nordugrid-ATLAS site information class inheriting from ATLASSiteInformation
# Instances are generated with SiteInformationFactory via pUtil::getSiteInformation()
# Implemented as a singleton class
# http://stackoverflow.com/questions/42558/python-and-the-singleton-pattern
# import relevant python/pilot modules
import os
import commands
import SiteMover
from SiteInformation import SiteInformation # Main site information class
from ATLASSiteInformation import ATLASSiteInformation # Main site information class
from pUtil import tolog # Logging method that sends text to the pilot log
from pUtil import readpar # Used to read values from the schedconfig DB (queuedata)
from PilotErrors import PilotErrors # Error codes
class NordugridATLASSiteInformation(ATLASSiteInformation):
# private data members
__experiment = "Nordugrid-ATLAS"
__instance = None
# Required methods
def __init__(self):
""" Default initialization """
pass
def __new__(cls, *args, **kwargs):
""" Override the __new__ method to make the class a singleton """
if not cls.__instance:
cls.__instance = super(ATLASSiteInformation, cls).__new__(cls, *args, **kwargs)
return cls.__instance
def getExperiment(self):
""" Return a string with the experiment name """
return self.__experiment
if __name__ == "__main__":
os.environ['PilotHomeDir'] = os.getcwd()
si = NordugridATLASSiteInformation()
tolog("Experiment: %s" % (si.getExperiment()))
cloud = "CERN"
queuename = si.getTier1Queue(cloud)
if queuename != "":
tolog("Cloud %s has Tier-1 queue %s" % (cloud, queuename))
else:
tolog("Failed to find a Tier-1 queue name for cloud %s" % (cloud))
| apache-2.0 |
xuvw/viewfinder | backend/watchdog/test/watchdog_test.py | 13 | 4678 | # Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Viewfinder watchdog tests.
"""
__author__ = 'matt@emailscrubbed.com (Matt Tracy)'
import logging
import time
from functools import partial
from viewfinder.backend.base import util, testing
from viewfinder.backend.watchdog import watchdog
from viewfinder.backend.watchdog.scenario import ScenarioDevice, Scenario
from viewfinder.backend.base.testing import async_test, async_test_timeout
class LogCatcher(logging.Handler):
def __init__(self, max_records, max_callback):
super(LogCatcher, self).__init__(logging.INFO)
self.saved_records = []
self.max_records = max_records
self.max_callback = max_callback
def emit(self, record):
self.saved_records.append(record)
if len(self.saved_records) == self.max_records:
self.max_callback()
def __enter__(self):
logging.getLogger().addHandler(self)
def __exit__(self, t, v, tb):
logging.getLogger().removeHandler(self)
class WatchdogTestCase(testing.BaseTestCase, testing.LogMatchTestCase):
def testScenario(self):
"""Test that a scenario properly handles log messages."""
fake_device = object()
def _ScenarioOne(device, logger, callback):
self.assertTrue(device is fake_device)
logger.info('Info message')
callback()
scenario = Scenario('My Scenario', _ScenarioOne, 0.5)
catcher = LogCatcher(5, self.stop)
with catcher:
scenario.StartLoop(fake_device)
self.wait(timeout=10)
self.assertEqual(5, len(catcher.saved_records))
for r in catcher.saved_records:
self.assertEqual(r.scenario, 'My Scenario')
self.assertEqual(r.levelno, logging.INFO)
def testScenarioError(self):
"""Test that a scenario properly handles thrown exceptions."""
fake_device = object()
def _ScenarioTwo(device, logger, callback):
raise ValueError('Value Error')
scenario = Scenario('My Scenario', _ScenarioTwo, 0.5)
catcher = LogCatcher(5, self.stop)
with catcher:
scenario.StartLoop(fake_device)
self.wait(timeout=10)
self.assertEqual(5, len(catcher.saved_records))
for r in catcher.saved_records:
self.assertEqual(r.scenario, 'My Scenario')
self.assertEqual(r.levelno, logging.ERROR)
def testServiceHealthMessage(self):
"""Test that the formatted message for service health alerts matches the expected format."""
expected = '(2 Alerts): Alert description.(2 machines), Cluster alert.(Cluster)'
report = {'status': 'ALERT',
'alerts': [
{'name': 'Alert1', 'count': 2, 'cluster': False, 'description': 'Alert description.'},
{'name': 'Alert2', 'count': 1, 'cluster': True, 'description': 'Cluster alert.'},
]
}
self.assertEqual(expected, watchdog._FormatServiceHealthReport(report))
def testWatchdog(self):
alerts = {'crit': [], 'err': [], 'warn': [], 'info': []}
called = {'crit': 0, 'err': 0, 'warn': 0, 'info': 0}
def _CriticalScenario(device, logger, callback):
called['crit'] += 1
logger.critical('Critical error')
callback()
def _ErrorScenario(device, logger, callback):
called['err'] += 1
logger.error('Error')
callback()
def _WarningScenario(device, logger, callback):
called['warn'] += 1
logger.warning('Warning')
callback()
def _InfoScenario(device, logger, callback):
called['info'] += 1
logger.info('Info')
callback()
def _AlertHook(scenario, message):
alerts[scenario.name].append(message)
wd = watchdog.Watchdog(object(), [Scenario('crit', _CriticalScenario, 0.5),
Scenario('err', _ErrorScenario, 0.5),
Scenario('warn', _WarningScenario, 0.5),
Scenario('info', _InfoScenario, 0.5)
])
wd._alert_hook = _AlertHook
catcher = LogCatcher(23, self.stop)
with catcher:
wd.Run(self.stop)
self.wait()
self.assertTrue(called['crit'] > 0)
self.assertTrue(len(alerts['crit']) == called['crit'])
self.assertTrue(all([m == '[crit] Critical error' for m in alerts['crit']]))
self.assertTrue(called['err'] > 0)
self.assertTrue(len(alerts['err']) == called['err'])
self.assertTrue(all([m == '[err] Error' for m in alerts['err']]))
self.assertTrue(called['warn'] > 0)
self.assertTrue(len(alerts['warn']) == called['warn'] - 2)
self.assertTrue(all([m == '[warn] Warning' for m in alerts['warn']]))
self.assertTrue(called['info'] > 0)
self.assertTrue(len(alerts['info']) == 0)
| apache-2.0 |
StormTrooper/osmc | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x0c4.py | 253 | 5024 | data = (
'sswals', # 0x00
'sswalt', # 0x01
'sswalp', # 0x02
'sswalh', # 0x03
'sswam', # 0x04
'sswab', # 0x05
'sswabs', # 0x06
'sswas', # 0x07
'sswass', # 0x08
'sswang', # 0x09
'sswaj', # 0x0a
'sswac', # 0x0b
'sswak', # 0x0c
'sswat', # 0x0d
'sswap', # 0x0e
'sswah', # 0x0f
'sswae', # 0x10
'sswaeg', # 0x11
'sswaegg', # 0x12
'sswaegs', # 0x13
'sswaen', # 0x14
'sswaenj', # 0x15
'sswaenh', # 0x16
'sswaed', # 0x17
'sswael', # 0x18
'sswaelg', # 0x19
'sswaelm', # 0x1a
'sswaelb', # 0x1b
'sswaels', # 0x1c
'sswaelt', # 0x1d
'sswaelp', # 0x1e
'sswaelh', # 0x1f
'sswaem', # 0x20
'sswaeb', # 0x21
'sswaebs', # 0x22
'sswaes', # 0x23
'sswaess', # 0x24
'sswaeng', # 0x25
'sswaej', # 0x26
'sswaec', # 0x27
'sswaek', # 0x28
'sswaet', # 0x29
'sswaep', # 0x2a
'sswaeh', # 0x2b
'ssoe', # 0x2c
'ssoeg', # 0x2d
'ssoegg', # 0x2e
'ssoegs', # 0x2f
'ssoen', # 0x30
'ssoenj', # 0x31
'ssoenh', # 0x32
'ssoed', # 0x33
'ssoel', # 0x34
'ssoelg', # 0x35
'ssoelm', # 0x36
'ssoelb', # 0x37
'ssoels', # 0x38
'ssoelt', # 0x39
'ssoelp', # 0x3a
'ssoelh', # 0x3b
'ssoem', # 0x3c
'ssoeb', # 0x3d
'ssoebs', # 0x3e
'ssoes', # 0x3f
'ssoess', # 0x40
'ssoeng', # 0x41
'ssoej', # 0x42
'ssoec', # 0x43
'ssoek', # 0x44
'ssoet', # 0x45
'ssoep', # 0x46
'ssoeh', # 0x47
'ssyo', # 0x48
'ssyog', # 0x49
'ssyogg', # 0x4a
'ssyogs', # 0x4b
'ssyon', # 0x4c
'ssyonj', # 0x4d
'ssyonh', # 0x4e
'ssyod', # 0x4f
'ssyol', # 0x50
'ssyolg', # 0x51
'ssyolm', # 0x52
'ssyolb', # 0x53
'ssyols', # 0x54
'ssyolt', # 0x55
'ssyolp', # 0x56
'ssyolh', # 0x57
'ssyom', # 0x58
'ssyob', # 0x59
'ssyobs', # 0x5a
'ssyos', # 0x5b
'ssyoss', # 0x5c
'ssyong', # 0x5d
'ssyoj', # 0x5e
'ssyoc', # 0x5f
'ssyok', # 0x60
'ssyot', # 0x61
'ssyop', # 0x62
'ssyoh', # 0x63
'ssu', # 0x64
'ssug', # 0x65
'ssugg', # 0x66
'ssugs', # 0x67
'ssun', # 0x68
'ssunj', # 0x69
'ssunh', # 0x6a
'ssud', # 0x6b
'ssul', # 0x6c
'ssulg', # 0x6d
'ssulm', # 0x6e
'ssulb', # 0x6f
'ssuls', # 0x70
'ssult', # 0x71
'ssulp', # 0x72
'ssulh', # 0x73
'ssum', # 0x74
'ssub', # 0x75
'ssubs', # 0x76
'ssus', # 0x77
'ssuss', # 0x78
'ssung', # 0x79
'ssuj', # 0x7a
'ssuc', # 0x7b
'ssuk', # 0x7c
'ssut', # 0x7d
'ssup', # 0x7e
'ssuh', # 0x7f
'ssweo', # 0x80
'ssweog', # 0x81
'ssweogg', # 0x82
'ssweogs', # 0x83
'ssweon', # 0x84
'ssweonj', # 0x85
'ssweonh', # 0x86
'ssweod', # 0x87
'ssweol', # 0x88
'ssweolg', # 0x89
'ssweolm', # 0x8a
'ssweolb', # 0x8b
'ssweols', # 0x8c
'ssweolt', # 0x8d
'ssweolp', # 0x8e
'ssweolh', # 0x8f
'ssweom', # 0x90
'ssweob', # 0x91
'ssweobs', # 0x92
'ssweos', # 0x93
'ssweoss', # 0x94
'ssweong', # 0x95
'ssweoj', # 0x96
'ssweoc', # 0x97
'ssweok', # 0x98
'ssweot', # 0x99
'ssweop', # 0x9a
'ssweoh', # 0x9b
'sswe', # 0x9c
'ssweg', # 0x9d
'sswegg', # 0x9e
'sswegs', # 0x9f
'sswen', # 0xa0
'sswenj', # 0xa1
'sswenh', # 0xa2
'sswed', # 0xa3
'sswel', # 0xa4
'sswelg', # 0xa5
'sswelm', # 0xa6
'sswelb', # 0xa7
'sswels', # 0xa8
'sswelt', # 0xa9
'sswelp', # 0xaa
'sswelh', # 0xab
'sswem', # 0xac
'ssweb', # 0xad
'sswebs', # 0xae
'sswes', # 0xaf
'sswess', # 0xb0
'ssweng', # 0xb1
'sswej', # 0xb2
'sswec', # 0xb3
'sswek', # 0xb4
'sswet', # 0xb5
'sswep', # 0xb6
'ssweh', # 0xb7
'sswi', # 0xb8
'sswig', # 0xb9
'sswigg', # 0xba
'sswigs', # 0xbb
'sswin', # 0xbc
'sswinj', # 0xbd
'sswinh', # 0xbe
'sswid', # 0xbf
'sswil', # 0xc0
'sswilg', # 0xc1
'sswilm', # 0xc2
'sswilb', # 0xc3
'sswils', # 0xc4
'sswilt', # 0xc5
'sswilp', # 0xc6
'sswilh', # 0xc7
'sswim', # 0xc8
'sswib', # 0xc9
'sswibs', # 0xca
'sswis', # 0xcb
'sswiss', # 0xcc
'sswing', # 0xcd
'sswij', # 0xce
'sswic', # 0xcf
'sswik', # 0xd0
'sswit', # 0xd1
'sswip', # 0xd2
'sswih', # 0xd3
'ssyu', # 0xd4
'ssyug', # 0xd5
'ssyugg', # 0xd6
'ssyugs', # 0xd7
'ssyun', # 0xd8
'ssyunj', # 0xd9
'ssyunh', # 0xda
'ssyud', # 0xdb
'ssyul', # 0xdc
'ssyulg', # 0xdd
'ssyulm', # 0xde
'ssyulb', # 0xdf
'ssyuls', # 0xe0
'ssyult', # 0xe1
'ssyulp', # 0xe2
'ssyulh', # 0xe3
'ssyum', # 0xe4
'ssyub', # 0xe5
'ssyubs', # 0xe6
'ssyus', # 0xe7
'ssyuss', # 0xe8
'ssyung', # 0xe9
'ssyuj', # 0xea
'ssyuc', # 0xeb
'ssyuk', # 0xec
'ssyut', # 0xed
'ssyup', # 0xee
'ssyuh', # 0xef
'sseu', # 0xf0
'sseug', # 0xf1
'sseugg', # 0xf2
'sseugs', # 0xf3
'sseun', # 0xf4
'sseunj', # 0xf5
'sseunh', # 0xf6
'sseud', # 0xf7
'sseul', # 0xf8
'sseulg', # 0xf9
'sseulm', # 0xfa
'sseulb', # 0xfb
'sseuls', # 0xfc
'sseult', # 0xfd
'sseulp', # 0xfe
'sseulh', # 0xff
)
| gpl-2.0 |
JCROM-Android/jcrom_external_chromium_org | ppapi/cpp/documentation/doxy_cleanup.py | 173 | 4455 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''This utility cleans up the html files as emitted by doxygen so
that they are suitable for publication on a Google documentation site.
'''
import optparse
import os
import re
import shutil
import string
import sys
try:
from BeautifulSoup import BeautifulSoup, Tag
except (ImportError, NotImplementedError):
print ("This tool requires the BeautifulSoup package "
"(see http://www.crummy.com/software/BeautifulSoup/).\n"
"Make sure that the file BeautifulSoup.py is either in this directory "
"or is available in your PYTHON_PATH")
raise
class HTMLFixer(object):
'''This class cleans up the html strings as produced by Doxygen
'''
def __init__(self, html):
self.soup = BeautifulSoup(html)
def FixTableHeadings(self):
'''Fixes the doxygen table headings.
This includes:
- Using bare <h2> title row instead of row embedded in <tr><td> in table
- Putting the "name" attribute into the "id" attribute of the <tr> tag.
- Splitting up tables into multiple separate tables if a table
heading appears in the middle of a table.
For example, this html:
<table>
<tr><td colspan="2"><h2><a name="pub-attribs"></a>
Data Fields List</h2></td></tr>
...
</table>
would be converted to this:
<h2>Data Fields List</h2>
<table>
...
</table>
'''
table_headers = []
for tag in self.soup.findAll('tr'):
if tag.td and tag.td.h2 and tag.td.h2.a and tag.td.h2.a['name']:
#tag['id'] = tag.td.h2.a['name']
tag.string = tag.td.h2.a.next
tag.name = 'h2'
table_headers.append(tag)
# reverse the list so that earlier tags don't delete later tags
table_headers.reverse()
# Split up tables that have multiple table header (th) rows
for tag in table_headers:
print "Header tag: %s is %s" % (tag.name, tag.string.strip())
# Is this a heading in the middle of a table?
if tag.findPreviousSibling('tr') and tag.parent.name == 'table':
print "Splitting Table named %s" % tag.string.strip()
table = tag.parent
table_parent = table.parent
table_index = table_parent.contents.index(table)
new_table = Tag(self.soup, name='table', attrs=table.attrs)
table_parent.insert(table_index + 1, new_table)
tag_index = table.contents.index(tag)
for index, row in enumerate(table.contents[tag_index:]):
new_table.insert(index, row)
# Now move the <h2> tag to be in front of the <table> tag
assert tag.parent.name == 'table'
table = tag.parent
table_parent = table.parent
table_index = table_parent.contents.index(table)
table_parent.insert(table_index, tag)
def RemoveTopHeadings(self):
'''Removes <div> sections with a header, tabs, or navpath class attribute'''
header_tags = self.soup.findAll(
name='div',
attrs={'class' : re.compile('^(header|tabs[0-9]*|navpath)$')})
[tag.extract() for tag in header_tags]
def FixAll(self):
self.FixTableHeadings()
self.RemoveTopHeadings()
def __str__(self):
return str(self.soup)
def main():
'''Main entry for the doxy_cleanup utility
doxy_cleanup takes a list of html files and modifies them in place.'''
parser = optparse.OptionParser(usage='Usage: %prog [options] files...')
parser.add_option('-m', '--move', dest='move', action='store_true',
default=False, help='move html files to "original_html"')
options, files = parser.parse_args()
if not files:
parser.print_usage()
return 1
for filename in files:
try:
with open(filename, 'r') as file:
html = file.read()
print "Processing %s" % filename
fixer = HTMLFixer(html)
fixer.FixAll()
with open(filename, 'w') as file:
file.write(str(fixer))
if options.move:
new_directory = os.path.join(
os.path.dirname(os.path.dirname(filename)), 'original_html')
if not os.path.exists(new_directory):
os.mkdir(new_directory)
shutil.move(filename, new_directory)
except:
print "Error while processing %s" % filename
raise
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
sckott/ingee_app | build/markdown/build/lib/markdown/extensions/sane_lists.py | 25 | 1260 | #!/usr/bin/env python
"""
Sane List Extension for Python-Markdown
=======================================
Modify the behavior of Lists in Python-Markdown t act in a sane manor.
In standard Markdown sytex, the following would constitute a single
ordered list. However, with this extension, the output would include
two lists, the first an ordered list and the second and unordered list.
1. ordered
2. list
* unordered
* list
Copyright 2011 - [Waylan Limberg](http://achinghead.com)
"""
import re
import markdown
class SaneOListProcessor(markdown.blockprocessors.OListProcessor):
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.))[ ]+(.*)')
SIBLING_TAGS = ['ol']
class SaneUListProcessor(markdown.blockprocessors.UListProcessor):
CHILD_RE = re.compile(r'^[ ]{0,3}(([*+-]))[ ]+(.*)')
SIBLING_TAGS = ['ul']
class SaneListExtension(markdown.Extension):
""" Add sane lists to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Override existing Processors. """
md.parser.blockprocessors['olist'] = SaneOListProcessor(md.parser)
md.parser.blockprocessors['ulist'] = SaneUListProcessor(md.parser)
def makeExtension(configs={}):
return SaneListExtension(configs=configs)
| mit |
Kami/sgrstats.com | sgrstats/news/models.py | 1 | 1151 | from django.db import models
from markupfield.fields import MarkupField
from django.contrib.auth.models import User
from taxonomy.models import TaxonomyMap
class News(models.Model):
title = models.CharField(max_length = 250)
title_slug = models.SlugField()
date_published = models.DateTimeField(auto_now_add = True)
author = models.ForeignKey(User)
excerpt = MarkupField(default_markup_type = 'markdown')
body = MarkupField(default_markup_type = 'markdown', blank = True)
class Meta():
verbose_name_plural = 'news'
def __unicode__(self):
return self.title
@models.permalink
def get_absolute_url(self):
return ('news_details', (), {'title_slug': self.title_slug})
def get_categories(self):
categories = TaxonomyMap.objects.filter(object_id = self.pk, content_type__model = 'news')
return categories
def get_categories_flat(self):
categories = TaxonomyMap.objects.filter(object_id = self.pk, content_type__model = 'news').values_list('term__term', flat = True)
return categories | apache-2.0 |
2013Commons/HUE-SHARK | build/env/lib/python2.7/site-packages/django_extensions-0.5-py2.7.egg/django_extensions/management/commands/create_command.py | 30 | 3590 | import os
from django.core.management.base import CommandError, AppCommand, _make_writeable
from optparse import make_option
class Command(AppCommand):
option_list = AppCommand.option_list + (
make_option('--name', '-n', action='store', dest='command_name', default='sample',
help='The name to use for the management command'),
make_option('--base', '-b', action='store', dest='base_command', default='Base',
help='The base class used for implementation of this command. Should be one of Base, App, Label, or NoArgs'),
)
help = ("Creates a Django management command directory structure for the given app name"
" in the current directory.")
args = "[appname]"
label = 'application name'
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = True
def handle_app(self, app, **options):
directory = os.getcwd()
app_name = app.__name__.split('.')[-2]
project_dir = os.path.join(directory, app_name)
if not os.path.exists(project_dir):
try:
os.mkdir(project_dir)
except OSError, e:
raise CommandError(e)
copy_template('command_template', project_dir, options.get('command_name'), '%sCommand' % options.get('base_command'))
def copy_template(template_name, copy_to, command_name, base_command):
"""copies the specified template directory to the copy_to location"""
import django_extensions
import re
import shutil
template_dir = os.path.join(django_extensions.__path__[0], 'conf', template_name)
handle_method = "handle(self, *args, **options)"
if base_command == 'AppCommand':
handle_method = "handle_app(self, app, **options)"
elif base_command == 'LabelCommand':
handle_method = "handle_label(self, label, **options)"
elif base_command == 'NoArgsCommand':
handle_method = "handle_noargs(self, **options)"
# walks the template structure and copies it
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir)+1:]
if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)):
os.mkdir(os.path.join(copy_to, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith('.'):
del subdirs[i]
for f in files:
if f.endswith('.pyc') or f.startswith('.DS_Store'):
continue
path_old = os.path.join(d, f)
path_new = os.path.join(copy_to, relative_dir, f.replace('sample', command_name))
if os.path.exists(path_new):
path_new = os.path.join(copy_to, relative_dir, f)
if os.path.exists(path_new):
continue
path_new = path_new.rstrip(".tmpl")
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read().replace('{{ command_name }}', command_name).replace('{{ base_command }}', base_command).replace('{{ handle_method }}', handle_method))
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write(style.NOTICE("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new))
| apache-2.0 |
zcchen/shadowsocks | shadowsocks/crypto/sodium.py | 1032 | 3778 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_ulonglong, byref, \
create_string_buffer, c_void_p
from shadowsocks.crypto import util
__all__ = ['ciphers']
libsodium = None
loaded = False
buf_size = 2048
# for salsa20 and chacha20
BLOCK_SIZE = 64
def load_libsodium():
global loaded, libsodium, buf
libsodium = util.find_library('sodium', 'crypto_stream_salsa20_xor_ic',
'libsodium')
if libsodium is None:
raise Exception('libsodium not found')
libsodium.crypto_stream_salsa20_xor_ic.restype = c_int
libsodium.crypto_stream_salsa20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
libsodium.crypto_stream_chacha20_xor_ic.restype = c_int
libsodium.crypto_stream_chacha20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
buf = create_string_buffer(buf_size)
loaded = True
class SodiumCrypto(object):
def __init__(self, cipher_name, key, iv, op):
if not loaded:
load_libsodium()
self.key = key
self.iv = iv
self.key_ptr = c_char_p(key)
self.iv_ptr = c_char_p(iv)
if cipher_name == 'salsa20':
self.cipher = libsodium.crypto_stream_salsa20_xor_ic
elif cipher_name == 'chacha20':
self.cipher = libsodium.crypto_stream_chacha20_xor_ic
else:
raise Exception('Unknown cipher')
# byte counter, not block counter
self.counter = 0
def update(self, data):
global buf_size, buf
l = len(data)
# we can only prepend some padding to make the encryption align to
# blocks
padding = self.counter % BLOCK_SIZE
if buf_size < padding + l:
buf_size = (padding + l) * 2
buf = create_string_buffer(buf_size)
if padding:
data = (b'\0' * padding) + data
self.cipher(byref(buf), c_char_p(data), padding + l,
self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr)
self.counter += l
# buf is copied to a str object when we access buf.raw
# strip off the padding
return buf.raw[padding:padding + l]
ciphers = {
'salsa20': (32, 8, SodiumCrypto),
'chacha20': (32, 8, SodiumCrypto),
}
def test_salsa20():
cipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_chacha20():
cipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_chacha20()
test_salsa20()
| apache-2.0 |
guessit-io/guessit | guessit/test/test_benchmark.py | 33 | 1336 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use,pointless-statement,missing-docstring,invalid-name,line-too-long
import time
import pytest
from ..api import guessit
def case1():
return guessit('Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv')
def case2():
return guessit('Movies/Fantastic Mr Fox/Fantastic.Mr.Fox.2009.DVDRip.{x264+LC-AAC.5.1}{Fr-Eng}{Sub.Fr-Eng}-™.[sharethefiles.com].mkv')
def case3():
return guessit('Series/dexter/Dexter.5x02.Hello,.Bandit.ENG.-.sub.FR.HDTV.XviD-AlFleNi-TeaM.[tvu.org.ru].avi')
def case4():
return guessit('Movies/The Doors (1991)/09.03.08.The.Doors.(1991).BDRip.720p.AC3.X264-HiS@SiLUHD-English.[sharethefiles.com].mkv')
@pytest.mark.benchmark(
group="Performance Tests",
min_time=1,
max_time=2,
min_rounds=5,
timer=time.time,
disable_gc=True,
warmup=False
)
@pytest.mark.skipif(True, reason="Disabled")
class TestBenchmark(object):
def test_case1(self, benchmark):
ret = benchmark(case1)
assert ret
def test_case2(self, benchmark):
ret = benchmark(case2)
assert ret
def test_case3(self, benchmark):
ret = benchmark(case3)
assert ret
def test_case4(self, benchmark):
ret = benchmark(case4)
assert ret
| lgpl-3.0 |
btrdeveloper/btr | Classes/GUI.py | 5 | 3240 | __author__ = 'naetech'
import sys
import wx
import wx.html
import wx.html2
import wx.lib.wxpTag
from Compiler import Compiler
class GUI:
def show_error(self, title, message):
app = wx.PySimpleApp()
dlg = ErrorDialog(None, title, message)
dlg.ShowModal()
dlg.Destroy()
app.MainLoop()
def show_browser(self, maximized, fullscreen, width, height, port):
app = wx.App()
browser_window = WebBrowser(width, height, None, -1)
browser_window.browser.LoadURL("http://localhost:%s" % (port))
if maximized:
browser_window.Maximize()
browser_window.Show()
if fullscreen:
browser_window.ShowFullScreen(True)
app.MainLoop()
# update the settings file to allow the user to change the full screen toggle
# update the settings file to allow the user to change the button to exit full screen
class WebBrowser(wx.Frame):
def __init__(self, width, height, *args, **kwds):
wx.Frame.__init__(self, *args, **kwds)
sizer = wx.BoxSizer(wx.VERTICAL)
self.browser = wx.html2.WebView.New(self)
sizer.Add(self.browser, 1, wx.EXPAND, 10)
self.SetSizer(sizer)
self.SetSize((width, height))
if Compiler.is_linux():
self.icon = wx.Icon("./icon.png", wx.BITMAP_TYPE_PNG)
self.SetIcon(self.icon)
self.Bind(wx.EVT_CLOSE, self.event_browser_closed)
self.Bind(wx.EVT_CHAR_HOOK, self.handle_keystrokes)
self.Bind(wx.html2.EVT_WEBVIEW_TITLE_CHANGED, self.title_changed, self.browser)
def title_changed(self, event):
self.SetTitle(event.GetString())
def handle_keystrokes(self, event):
key_code = event.GetKeyCode()
if key_code == wx.WXK_ESCAPE:
self.ShowFullScreen(False)
elif key_code == wx.WXK_F11:
if self.IsFullScreen():
self.ShowFullScreen(False)
else:
self.ShowFullScreen(True)
else:
event.Skip()
def event_browser_closed(self, event):
self.Destroy()
class ErrorDialog(wx.Dialog):
text = '''
<html>
<body bgcolor="#FFF">
<center><table bgcolor="#F2DEDE" width="100%%" cellspacing="0"
cellpadding="0" border="1" bordercolor="red">
<tr>
<td align="center" color="red">
<font color="red">
<h1>%s</h1>
</font>
</td>
</tr>
</table>
<p cellspacing="0" cellpadding="5">
%s
</p>
<p>
<wxp module="wx" class="Button">
<param name="label" value="OK">
<param name="id" value="ID_OK">
</wxp>
</p>
</center>
</body>
</html>
'''
def __init__(self, parent, title, error_msg):
wx.Dialog.__init__(self, parent, -1, 'System Error',)
html = wx.html.HtmlWindow(self, -1, size=(420, -1))
if "gtk2" in wx.PlatformInfo:
html.SetStandardFonts()
py_version = sys.version.split()[0]
txt = self.text % (title, error_msg)
html.SetPage(txt)
btn = html.FindWindowById(wx.ID_OK)
ir = html.GetInternalRepresentation()
html.SetSize( (ir.GetWidth()+25, ir.GetHeight()+25) )
self.SetClientSize(html.GetSize())
self.CentreOnParent(wx.BOTH) | mit |
saurabh6790/ON-RISAPP | patches/june_2013/p05_remove_unused_doctypes.py | 30 | 1916 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import webnotes
def execute():
doctypes = [
"Announcement",
"Authorization Rules",
"Blog Subscriber",
"Books",
"Call Log",
"Call Log Details",
"Campaign Expense",
"Communication Log",
"Compaint Note",
"Company Control",
"Competitor",
"Complaint Detail",
"Desktop Item",
"DocType Label",
"Employee Training",
"Featured Item",
"GL Control",
"GL Mapper",
"GL Mapper Detail",
"Holiday Block List",
"Holiday Block List Allow",
"Holiday Block List Date",
"Home Control",
"Home Settings",
"Item Parameter",
"Item Parameter Value",
"Lead Email CC Detail",
"Manage Account",
"Market Segment",
"Multi Ledger Report Detail",
"Naming Series Options",
"Navigation Control",
"Online Contact",
"Order Reconciliation",
"Order Reconciliation Detail",
"Other Income Detail",
"Partner Target Detail",
"Permission Control",
"Permission Rules",
"Print Style",
"Product Catalogue Control",
"Product Group",
"Product Settings",
"Products Settings",
"Profile Control",
"Project Activity",
"Project Activity Update",
"Project Control",
"Project Cost Breakup",
"Related Page",
"RV Detail",
"Sales Browser Control",
"Sandbox",
"Search Criteria",
"Series Detail",
"Shipping Address",
"SMS Receiver",
"State",
"TC Detail",
"Territory Target Detail",
"Timesheet",
"Timesheet Detail",
"Top Bar Settings",
"Training Session",
"Training Session Details",
"Transfer Ownership",
"Trash Control",
"Trend Analyzer Control",
"Update Delivery Date",
"User Setting-Profile",
"User Setting-Role Permission",
"User Setting-Role User",
"User Settings",
"Valuation Control",
"Website Product Category",
"Workflow Action Detail",
"Workflow Engine",
"Workflow Rule",
"Workflow Rule Detail"
]
for d in doctypes:
webnotes.delete_doc("DocType", d) | agpl-3.0 |
MrNuggles/HeyBoet-Telegram-Bot | temboo/core/proxy.py | 4 | 4631 | ###############################################################################
#
# temboo.core.proxy.TembooProxy
# temboo.core.proxy.TembooProxifiedChoreography
#
# Classes to proxy choreo execution requests made from the JavaScript SDK
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
###############################################################################
import copy
import json
from temboo.core.exception import TembooError
from temboo.core.exception import TembooDisallowedInputError
from temboo.core.exception import TembooNotFoundError
class TembooProxy(object):
def __init__(self):
self._choreos = {}
def _get_choreo(self, name):
# Make sure we know about the specified choreo
if not name in self._choreos:
raise TembooNotFoundError('Proxied Choreo not found: ' + name)
return self._choreos[name]
def add_choreo(self, name, choreo, defaultInputs={}, *allowedUserInputs):
proxified = _TembooProxifiedChoreography(choreo)
if(0 < len(defaultInputs)):
# Grab a new input set
inputs = choreo.new_input_set()
# Add inputs
for key in dict:
inputs.set_input(key, dict[key])
# Set on choreo
proxified.set_default_inputs(inputs)
if(0 < len(allowedUserInputs)):
proxified.allow_user_inputs(list(allowedUserInputs))
self._choreos[name] = proxified
def allow_user_inputs(self, name, *allowedUserInputs):
choreo = self._get_choreo(name)
if(0 < len(allowedUserInputs)):
if isinstance(allowedUserInputs[0], basestring):
# one or more input names as strings
allowedUserInputs = list(allowedUserInputs)
else:
# a list of input names
allowedUserInputs = allowedUserInputs[0]
choreo.allow_user_inputs(allowedUserInputs)
def execute(self, request, asJson=True):
try:
if isinstance(request, basestring):
request = json.loads(request);
if not 'name' in request:
raise TembooError('Missing choreo name')
elif not 'version' in request:
raise TembooError('Missing required JS SDK version')
# Parse request
choreo = self._get_choreo(request['name'])
inputs = request['inputs'] if 'inputs' in request else {}
outputFilters = request['outputFilters'] if 'outputFilters' in request else {}
# Execute the proxified choreo
result = choreo.execute(inputs, outputFilters, request['version'])
# Build the formatted response
response = {'success':'true', 'outputs':result.outputs}
# Respond appropriately
return json.dumps(response) if asJson else response
except TembooDisallowedInputError as e:
err = {'error':'true', 'type':e.type, 'message':e.args[0], 'inputName':e.input_name}
return json.dumps(err) if asJson else err
except TembooError as e:
err = {'error':'true', 'type':e.type, 'message':e.args[0]}
return json.dumps(err) if asJson else err
except Exception as e:
err = {'error':'true', 'type':'Server', 'nativeType':type(e).__name__, 'message':'An unknown error occurred'}
return json.dumps(err) if asJson else err
def set_default_inputs(self, name, defaultInputs):
choreo = self._get_choreo(name)
choreo._defaultInputs = defaultInputs
class _TembooProxifiedChoreography(object):
def __init__(self, choreo):
self._allowedUserInputs = []
self._defaultInputs = choreo.new_input_set()
self._choreo = choreo
def allow_user_inputs(self, inputs):
for name in inputs:
if(not name in self._allowedUserInputs):
self._allowedUserInputs.append(name)
def execute(self, inputs, outputFilters, jsClientVersion):
fullInputs = copy.deepcopy(self._defaultInputs)
# verify specified inputs are allowed
for name in inputs:
if(not name in self._allowedUserInputs):
raise TembooDisallowedInputError('Illegal input specified', name)
fullInputs._set_input(name, inputs[name]);
# add output filters
for name in outputFilters:
fullInputs.add_output_filter(name, outputFilters[name]['path'], outputFilters[name]['variable'])
# set the client SDK version
self._choreo._set_js_client_version(jsClientVersion)
return self._choreo.execute_with_results(fullInputs)
| gpl-3.0 |
icereval/osf.io | addons/onedrive/migrations/0002_auto_20171121_1426.py | 22 | 1426 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-21 20:26
from __future__ import unicode_literals
import datetime
import pytz
from django.db import migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('addons_onedrive', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='nodesettings',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, default=datetime.datetime(1970, 1, 1, 0, 0, tzinfo=pytz.utc), verbose_name='created'),
preserve_default=False,
),
migrations.AddField(
model_name='nodesettings',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
migrations.AddField(
model_name='usersettings',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, default=datetime.datetime(1970, 1, 1, 0, 0, tzinfo=pytz.utc), verbose_name='created'),
preserve_default=False,
),
migrations.AddField(
model_name='usersettings',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
]
| apache-2.0 |
companyService/crawler_code | company_service/settings.py | 2 | 1223 | # -*- coding: utf-8 -*-
# Scrapy settings for company_service project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
import os
BOT_NAME = 'company_service'
SPIDER_MODULES = ['company_service.spiders']
NEWSPIDER_MODULE = 'company_service.spiders'
ITEM_PIPELINES = {
'company_service.pipelines.CompanyServicePipeline': 300
}
DB_SERVER = 'MySQLdb'
DB_CONNECT = {
'db' : 'contribute_crawler',
'host' : 'mysql',
'user' : os.getenv("DATABASE_USER"),
'passwd' : os.getenv("DATABASE_PASSWD")
'charset' : 'utf8',
}
EXTENSIONS = {
'company_service.patch.PatchGenerator' : 300,
'company_service.close_sender.CloseSender' : 500,
}
STATSMAILER_RCPTS = [
'windworship2@163.com',
'258831720@qq.com'
]
MAIL_FROM = os.getenv("MAIL_USER")
MAIL_HOST = 'smtp-mail.outlook.com'
MAIL_USER = os.getenv("MAIL_USER")
MAIL_PASS = os.getenv("MAIL_PASSWD")
WEBSERVICE_HOST = '0.0.0.0'
WEBSERVICE_LOGFILE = 'web.log'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'company_service (+http://www.yourdomain.com)'
| mit |
ticklemepierce/osf.io | website/addons/box/tests/utils.py | 24 | 6468 | # -*- coding: utf-8 -*-
import mock
from contextlib import contextmanager
from modularodm import storage
from framework.mongo import set_up_storage
from website.addons.base.testing import OAuthAddonTestCaseMixin, AddonTestCase
from website.addons.box import MODELS
from website.addons.box.model import Box
from website.addons.box.tests.factories import BoxAccountFactory
def init_storage():
set_up_storage(MODELS, storage_class=storage.MongoStorage)
class BoxAddonTestCase(OAuthAddonTestCaseMixin, AddonTestCase):
ADDON_SHORT_NAME = 'box'
ExternalAccountFactory = BoxAccountFactory
Provider = Box
def set_node_settings(self, settings):
super(BoxAddonTestCase, self).set_node_settings(settings)
settings.folder_id = '1234567890'
settings.folder_name = 'Foo'
mock_responses = {
'folder': {
'name': 'anything',
'item_collection': {
'entries': [
{
'name': 'anything', 'type': 'file', 'id': 'anything'
},
{
'name': 'anything', 'type': 'folder', 'id': 'anything'
},
{
'name': 'anything', 'type': 'anything', 'id': 'anything'
},
]
},
'path_collection': {
'entries': [
{
'name': 'anything', 'type': 'file', 'id': 'anything'
},
{
'name': 'anything', 'type': 'folder', 'id': 'anything'
},
{
'name': 'anything', 'type': 'anything', 'id': 'anything'
},
]
}
},
'put_file': {
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'box',
'size': '77 bytes',
'thumb_exists': False
},
'metadata_list': {
"size": "0 bytes",
"hash": "37eb1ba1849d4b0fb0b28caf7ef3af52",
"bytes": 0,
"thumb_exists": False,
"rev": "714f029684fe",
"modified": "Wed, 27 Apr 2011 22:18:51 +0000",
"path": "/Public",
"is_dir": True,
"icon": "folder_public",
"root": "box",
"contents": [
{
"size": "0 bytes",
"rev": "35c1f029684fe",
"thumb_exists": False,
"bytes": 0,
"modified": "Mon, 18 Jul 2011 20:13:43 +0000",
"client_mtime": "Wed, 20 Apr 2011 16:20:19 +0000",
"path": "/Public/latest.txt",
"is_dir": False,
"icon": "page_white_text",
"root": "box",
"mime_type": "text/plain",
"revision": 220191
},
{
u'bytes': 0,
u'icon': u'folder',
u'is_dir': True,
u'modified': u'Sat, 22 Mar 2014 05:40:29 +0000',
u'path': u'/datasets/New Folder',
u'rev': u'3fed51f002c12fc',
u'revision': 67032351,
u'root': u'box',
u'size': u'0 bytes',
u'thumb_exists': False
}
],
"revision": 29007
},
'metadata_single': {
u'id': 'id',
u'bytes': 74,
u'client_mtime': u'Mon, 13 Jan 2014 20:24:15 +0000',
u'icon': u'page_white',
u'is_dir': False,
u'mime_type': u'text/csv',
u'modified': u'Fri, 21 Mar 2014 05:46:36 +0000',
u'path': '/datasets/foo.txt',
u'rev': u'a2149fb64',
u'revision': 10,
u'root': u'app_folder',
u'size': u'74 bytes',
u'thumb_exists': False
},
'revisions': [{u'bytes': 0,
u'client_mtime': u'Wed, 31 Dec 1969 23:59:59 +0000',
u'icon': u'page_white_picture',
u'is_deleted': True,
u'is_dir': False,
u'mime_type': u'image/png',
u'modified': u'Tue, 25 Mar 2014 03:39:13 +0000',
u'path': u'/svs-v-barks.png',
u'rev': u'3fed741002c12fc',
u'revision': 67032897,
u'root': u'box',
u'size': u'0 bytes',
u'thumb_exists': True},
{u'bytes': 151164,
u'client_mtime': u'Sat, 13 Apr 2013 21:56:36 +0000',
u'icon': u'page_white_picture',
u'is_dir': False,
u'mime_type': u'image/png',
u'modified': u'Tue, 25 Mar 2014 01:45:51 +0000',
u'path': u'/svs-v-barks.png',
u'rev': u'3fed61a002c12fc',
u'revision': 67032602,
u'root': u'box',
u'size': u'147.6 KB',
u'thumb_exists': True}]
}
class MockBox(object):
def put_file(self, full_path, file_obj, overwrite=False, parent_rev=None):
return mock_responses['put_file']
def metadata(self, path, list=True, file_limit=25000, hash=None, rev=None,
include_deleted=False):
if list:
ret = mock_responses['metadata_list']
else:
ret = mock_responses['metadata_single']
ret['path'] = path
return ret
def get_folder(*args, **kwargs):
return mock_responses['folder']
def get_file_and_metadata(*args, **kwargs):
pass
def file_delete(self, path):
return mock_responses['metadata_single']
def revisions(self, path):
ret = mock_responses['revisions']
for each in ret:
each['path'] = path
return ret
def get_user_info(self):
return {'display_name': 'Mr. Box'}
@contextmanager
def patch_client(target, mock_client=None):
"""Patches a function that returns a BoxClient, returning an instance
of MockBox instead.
Usage: ::
with patch_client('website.addons.box.views.BoxClient') as client:
# test view that uses the box client.
"""
with mock.patch(target) as client_getter:
client = mock_client or MockBox()
client_getter.return_value = client
yield client
| apache-2.0 |
cmc333333/idea-box | src/idea/migrations/0010_auto__add_field_banner_title__chg_field_banner_text__add_field_idea_ba.py | 6 | 8503 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Banner.title'
db.add_column(u'idea_banner', 'title',
self.gf('django.db.models.fields.CharField')(default='Default Title', max_length=50),
keep_default=False)
# Changing field 'Banner.text'
db.alter_column(u'idea_banner', 'text', self.gf('django.db.models.fields.CharField')(max_length=2000))
# Adding field 'Idea.banner'
db.add_column(u'idea_idea', 'banner',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['idea.Banner'], null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Banner.title'
db.delete_column(u'idea_banner', 'title')
# Changing field 'Banner.text'
db.alter_column(u'idea_banner', 'text', self.gf('django.db.models.fields.CharField')(max_length=512))
# Deleting field 'Idea.banner'
db.delete_column(u'idea_idea', 'banner_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'idea.banner': {
'Meta': {'object_name': 'Banner'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'idea.idea': {
'Meta': {'object_name': 'Idea'},
'banner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['idea.Banner']", 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['idea.State']"}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 3, 28, 0, 0)'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'idea.state': {
'Meta': {'object_name': 'State'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'previous': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['idea.State']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'idea.vote': {
'Meta': {'object_name': 'Vote'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['idea.Idea']"}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 3, 28, 0, 0)'}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.tagcategory': {
'Meta': {'object_name': 'TagCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
'create_timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"}),
'tag_category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['taggit.TagCategory']", 'null': 'True'}),
'tag_creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_related'", 'null': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['idea'] | cc0-1.0 |
trishnaguha/python-fedora | fedora/client/fasproxy.py | 5 | 8425 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2009 Red Hat, Inc.
# This file is part of python-fedora
#
# python-fedora is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# python-fedora is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with python-fedora; if not, see <http://www.gnu.org/licenses/>
#
'''Implement a class that sets up threadsafe communication with the Fedora
Account System
.. moduleauthor:: Ricky Zhou <ricky@fedoraproject.org>
.. moduleauthor:: Toshio Kuratomi <tkuratom@redhat.com>
.. versionadded:: 0.3.17
'''
from fedora.client import AuthError, AppError
from fedora.client.proxyclient import ProxyClient
from fedora import __version__
import logging
log = logging.getLogger(__name__)
class FasProxyClient(ProxyClient):
'''A threadsafe client to the Fedora Account System.'''
def __init__(self, base_url='https://admin.fedoraproject.org/accounts/',
*args, **kwargs):
'''A threadsafe client to the Fedora Account System.
This class is optimized to proxy multiple users to the account system.
ProxyClient is designed to be threadsafe so that code can instantiate
one instance of the class and use it for multiple requests for
different users from different threads.
If you want something that can manage a single user's connection to
the Account System then use fedora.client.AccountSystem instead.
:kwargs base_url: Base of every URL used to contact the server.
Defaults to the Fedora Project FAS instance.
:kwargs useragent: useragent string to use. If not given, default to
"FAS Proxy Client/VERSION"
:kwarg session_name: name of the cookie to use with session handling
:kwarg debug: If True, log debug information
:kwarg insecure: If True, do not check server certificates against
their CA's. This means that man-in-the-middle attacks are
possible against the `BaseClient`. You might turn this option on
for testing against a local version of a server with a self-signed
certificate but it should be off in production.
'''
if 'useragent' not in kwargs:
kwargs['useragent'] = 'FAS Proxy Client/%s' % __version__
if 'session_as_cookie' in kwargs and kwargs['session_as_cookie']:
# No need to allow this in FasProxyClient as it's deprecated in
# ProxyClient
raise TypeError('FasProxyClient() got an unexpected keyword'
' argument \'session_as_cookie\'')
kwargs['session_as_cookie'] = False
super(FasProxyClient, self).__init__(base_url, *args, **kwargs)
def login(self, username, password):
'''Login to the Account System
:arg username: username to send to FAS
:arg password: Password to verify the username with
:returns: a tuple of the session id FAS has associated with the user
and the user's account information. This is similar to what is
returned by
:meth:`fedora.client.proxyclient.ProxyClient.get_user_info`
:raises AuthError: if the username and password do not work
'''
return self.send_request(
'/login',
auth_params={'username': username, 'password': password}
)
def logout(self, session_id):
'''Logout of the Account System
:arg session_id: a FAS session_id to remove from FAS
'''
self.send_request('/logout', auth_params={'session_id': session_id})
def refresh_session(self, session_id):
'''Try to refresh a session_id to prevent it from timing out
:arg session_id: FAS session_id to refresh
:returns: session_id that FAS has set now
'''
return self.send_request('', auth_params={'session_id': session_id})
def verify_session(self, session_id):
'''Verify that a session is active.
:arg session_id: session_id to verify is currently associated with a
logged in user
:returns: True if the session_id is valid. False otherwise.
'''
try:
self.send_request('/home', auth_params={'session_id': session_id})
except AuthError:
return False
except:
raise
return True
def verify_password(self, username, password):
'''Return whether the username and password pair are valid.
:arg username: username to try authenticating
:arg password: password for the user
:returns: True if the username/password are valid. False otherwise.
'''
try:
self.send_request('/home',
auth_params={'username': username,
'password': password})
except AuthError:
return False
except:
raise
return True
def get_user_info(self, auth_params):
'''Retrieve information about a logged in user.
:arg auth_params: Auth information for a particular user. For
instance, this can be a username/password pair or a session_id.
Refer to
:meth:`fedora.client.proxyclient.ProxyClient.send_request` for all
the legal values for this.
:returns: a tuple of session_id and information about the user.
:raises AuthError: if the auth_params do not give access
'''
request = self.send_request('/user/view', auth_params=auth_params)
return (request[0], request[1]['person'])
def person_by_id(self, person_id, auth_params):
'''Retrieve information about a particular person
:arg auth_params: Auth information for a particular user. For
instance, this can be a username/password pair or a session_id.
Refer to
:meth:`fedora.client.proxyclient.ProxyClient.send_request` for all
the legal values for this.
:returns: a tuple of session_id and information about the user.
:raises AppError: if the server returns an exception
:raises AuthError: if the auth_params do not give access
'''
request = self.send_request('/json/person_by_id',
req_params={'person_id': person_id},
auth_params=auth_params)
if request[1]['success']:
# In a devel version of FAS, membership info was returned
# separately
# This has been corrected in a later version
# Can remove this code at some point
if 'approved' in request[1]:
request[1]['person']['approved_memberships'] = \
request[1]['approved']
if 'unapproved' in request[1]:
request[1]['person']['unapproved_memberships'] = \
request[1]['unapproved']
return (request[0], request[1]['person'])
else:
raise AppError(name='Generic AppError',
message=request[1]['tg_flash'])
def group_list(self, auth_params):
'''Retrieve a list of groups
:arg auth_params: Auth information for a particular user. For
instance, this can be a username/password pair or a session_id.
Refer to
:meth:`fedora.client.proxyclient.ProxyClient.send_request` for all
the legal values for this.
:returns: a tuple of session_id and information about groups. The
groups information is in two fields:
:groups: contains information about each group
:memberships: contains information about which users are members
of which groups
:raises AuthError: if the auth_params do not give access
'''
request = self.send_request('/group/list', auth_params=auth_params)
return request
| gpl-2.0 |
tectronics/pysal | pysal/spreg/summary_output.py | 5 | 33110 | """
Internal helper files for user output.
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, David C. Folch david.folch@asu.edu, Pedro V. Amaral pedro.amaral@asu.edu, Jing Yao jingyao@asu.edu"
import textwrap as TW
import numpy as np
import copy as COPY
import diagnostics as diagnostics
import diagnostics_tsls as diagnostics_tsls
import diagnostics_sp as diagnostics_sp
import pysal
import scipy
from scipy.sparse.csr import csr_matrix
__all__ = []
###############################################################################
############### Primary functions for running summary diagnostics #############
###############################################################################
"""
This section contains one function for each user level regression class. These
are called directly from the user class. Each one mixes and matches smaller
functions located later in this module.
"""
def OLS(reg, vm, w, nonspat_diag, spat_diag, moran, regimes=False):
reg.__summary = {}
# compute diagnostics and organize summary output
beta_diag_ols(reg, reg.robust)
if nonspat_diag:
# compute diagnostics
reg.sig2ML = reg.sig2n
reg.f_stat = diagnostics.f_stat(reg)
reg.logll = diagnostics.log_likelihood(reg)
reg.aic = diagnostics.akaike(reg)
reg.schwarz = diagnostics.schwarz(reg)
reg.mulColli = diagnostics.condition_index(reg)
reg.jarque_bera = diagnostics.jarque_bera(reg)
reg.breusch_pagan = diagnostics.breusch_pagan(reg)
reg.koenker_bassett = diagnostics.koenker_bassett(reg)
reg.white = diagnostics.white(reg)
# organize summary output
reg.__summary['summary_nonspat_diag_1'] = summary_nonspat_diag_1(reg)
reg.__summary['summary_nonspat_diag_2'] = summary_nonspat_diag_2(reg)
if spat_diag:
# compute diagnostics and organize summary output
spat_diag_ols(reg, w, moran)
if regimes:
summary_regimes(reg)
summary(reg=reg, vm=vm, instruments=False, nonspat_diag=nonspat_diag, spat_diag=spat_diag)
def OLS_multi(reg, multireg, vm, nonspat_diag, spat_diag, moran, regimes=False):
for m in multireg:
mreg = multireg[m]
mreg.__summary = {}
# compute diagnostics and organize summary output
beta_diag_ols(mreg, mreg.robust)
if nonspat_diag:
# compute diagnostics
mreg.sig2ML = mreg.sig2n
mreg.f_stat = diagnostics.f_stat(mreg)
mreg.logll = diagnostics.log_likelihood(mreg)
mreg.aic = diagnostics.akaike(mreg)
mreg.schwarz = diagnostics.schwarz(mreg)
mreg.mulColli = diagnostics.condition_index(mreg)
mreg.jarque_bera = diagnostics.jarque_bera(mreg)
mreg.breusch_pagan = diagnostics.breusch_pagan(mreg)
mreg.koenker_bassett = diagnostics.koenker_bassett(mreg)
mreg.white = diagnostics.white(mreg)
# organize summary output
mreg.__summary['summary_nonspat_diag_1'] = summary_nonspat_diag_1(mreg)
mreg.__summary['summary_nonspat_diag_2'] = summary_nonspat_diag_2(mreg)
if spat_diag:
# compute diagnostics and organize summary output
spat_diag_ols(mreg, mreg.w, moran)
if regimes:
summary_regimes(mreg,chow=False)
multireg[m].__summary = mreg.__summary
reg.__summary = {}
summary_chow(reg)
summary_warning(reg)
summary_multi(reg=reg, multireg=multireg, vm=vm, instruments=False, nonspat_diag=nonspat_diag, spat_diag=spat_diag)
def TSLS(reg, vm, w, spat_diag, regimes=False):
reg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(reg, reg.robust)
if spat_diag:
# compute diagnostics and organize summary output
spat_diag_instruments(reg, w)
# build coefficients table body
build_coefs_body_instruments(reg)
if regimes:
summary_regimes(reg)
summary(reg=reg, vm=vm, instruments=True, nonspat_diag=False, spat_diag=spat_diag)
def TSLS_multi(reg, multireg, vm, spat_diag, regimes=False):
for m in multireg:
mreg = multireg[m]
mreg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(mreg, mreg.robust)
if spat_diag:
# compute diagnostics and organize summary output
spat_diag_instruments(mreg, mreg.w)
# build coefficients table body
build_coefs_body_instruments(mreg)
if regimes:
summary_regimes(mreg,chow=False)
multireg[m].__summary = mreg.__summary
reg.__summary = {}
summary_chow(reg)
summary_warning(reg)
summary_multi(reg=reg, multireg=multireg, vm=vm, instruments=True, nonspat_diag=False, spat_diag=spat_diag)
def GM_Lag(reg, vm, w, spat_diag, regimes=False):
reg.__summary = {}
# compute diagnostics and organize summary output
beta_diag_lag(reg, reg.robust)
if spat_diag:
# compute diagnostics and organize summary output
spat_diag_instruments(reg, w)
# build coefficients table body
summary_coefs_yend(reg, reg.z_stat)
summary_coefs_instruments(reg)
if regimes:
summary_regimes(reg)
summary_warning(reg)
summary(reg=reg, vm=vm, instruments=True, nonspat_diag=False, spat_diag=spat_diag)
def GM_Lag_multi(reg, multireg, vm, spat_diag, regimes=False):
for m in multireg:
mreg = multireg[m]
mreg.__summary = {}
# compute diagnostics and organize summary output
beta_diag_lag(mreg, mreg.robust)
if spat_diag:
# compute diagnostics and organize summary output
spat_diag_instruments(mreg, mreg.w)
# build coefficients table body
summary_coefs_yend(mreg, mreg.z_stat)
summary_coefs_instruments(mreg)
if regimes:
summary_regimes(mreg,chow=False)
multireg[m].__summary = mreg.__summary
reg.__summary = {}
summary_chow(reg)
summary_warning(reg)
summary_multi(reg=reg, multireg=multireg, vm=vm, instruments=True, nonspat_diag=False, spat_diag=spat_diag)
def GM_Error(reg, vm, w, regimes=False):
reg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(reg, None)
# build coefficients table body
beta_position = summary_coefs_somex(reg, reg.z_stat)
summary_coefs_lambda(reg, reg.z_stat)
if regimes:
summary_regimes(reg)
summary_warning(reg)
summary(reg=reg, vm=vm, instruments=False, nonspat_diag=False, spat_diag=False)
def GM_Error_multi(reg, multireg, vm, regimes=False):
for m in multireg:
mreg = multireg[m]
mreg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(mreg, None)
# build coefficients table body
beta_position = summary_coefs_somex(mreg, mreg.z_stat)
summary_coefs_lambda(mreg, mreg.z_stat)
if regimes:
summary_regimes(mreg,chow=False)
multireg[m].__summary = mreg.__summary
reg.__summary = {}
summary_chow(reg)
summary_warning(reg)
summary_multi(reg=reg, multireg=multireg, vm=vm, instruments=False, nonspat_diag=False, spat_diag=False)
def GM_Endog_Error(reg, vm, w, regimes=False):
reg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(reg, None)
# build coefficients table body
summary_coefs_yend(reg, reg.z_stat, lambd=True)
summary_coefs_lambda(reg, reg.z_stat)
summary_coefs_instruments(reg)
if regimes:
summary_regimes(reg)
summary_warning(reg)
summary(reg=reg, vm=vm, instruments=True, nonspat_diag=False, spat_diag=False)
def GM_Endog_Error_multi(reg, multireg, vm, regimes=False):
for m in multireg:
mreg = multireg[m]
mreg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(mreg, None)
# build coefficients table body
summary_coefs_yend(mreg, mreg.z_stat, lambd=True)
summary_coefs_lambda(mreg, mreg.z_stat)
summary_coefs_instruments(mreg)
if regimes:
summary_regimes(mreg,chow=False)
reg.__summary = {}
summary_chow(reg)
summary_warning(reg)
summary_multi(reg=reg, multireg=multireg, vm=vm, instruments=True, nonspat_diag=False, spat_diag=False)
def GM_Error_Hom(reg, vm, w, regimes=False):
reg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(reg, None)
# build coefficients table body
beta_position = summary_coefs_somex(reg, reg.z_stat)
summary_coefs_lambda(reg, reg.z_stat)
if regimes:
summary_regimes(reg)
summary_warning(reg)
summary(reg=reg, vm=vm, instruments=False, nonspat_diag=False, spat_diag=False)
def GM_Error_Hom_multi(reg, multireg, vm, regimes=False):
for m in multireg:
mreg = multireg[m]
mreg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(mreg, None)
# build coefficients table body
beta_position = summary_coefs_somex(mreg, mreg.z_stat)
summary_coefs_lambda(mreg, mreg.z_stat)
if regimes:
summary_regimes(mreg,chow=False)
reg.__summary = {}
summary_chow(reg)
summary_warning(reg)
summary_multi(reg=reg, multireg=multireg, vm=vm, instruments=False, nonspat_diag=False, spat_diag=False)
def GM_Endog_Error_Hom(reg, vm, w, regimes=False):
reg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(reg, None)
# build coefficients table body
summary_coefs_yend(reg, reg.z_stat, lambd=True)
summary_coefs_lambda(reg, reg.z_stat)
summary_coefs_instruments(reg)
if regimes:
summary_regimes(reg)
summary_warning(reg)
summary(reg=reg, vm=vm, instruments=True, nonspat_diag=False, spat_diag=False)
def GM_Endog_Error_Hom_multi(reg, multireg, vm, regimes=False):
for m in multireg:
mreg = multireg[m]
mreg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(mreg, None)
# build coefficients table body
summary_coefs_yend(mreg, mreg.z_stat, lambd=True)
summary_coefs_lambda(mreg, mreg.z_stat)
summary_coefs_instruments(mreg)
if regimes:
summary_regimes(mreg,chow=False)
reg.__summary = {}
summary_chow(reg)
summary_warning(reg)
summary_multi(reg=reg, multireg=multireg, vm=vm, instruments=True, nonspat_diag=False, spat_diag=False)
def GM_Error_Het(reg, vm, w, regimes=False):
reg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(reg, 'het')
# build coefficients table body
beta_position = summary_coefs_somex(reg, reg.z_stat)
summary_coefs_lambda(reg, reg.z_stat)
if regimes:
summary_regimes(reg)
summary_warning(reg)
summary(reg=reg, vm=vm, instruments=False, nonspat_diag=False, spat_diag=False)
def GM_Error_Het_multi(reg, multireg, vm, regimes=False):
for m in multireg:
mreg = multireg[m]
mreg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(mreg, 'het')
# build coefficients table body
beta_position = summary_coefs_somex(mreg, mreg.z_stat)
summary_coefs_lambda(mreg, mreg.z_stat)
if regimes:
summary_regimes(mreg,chow=False)
reg.__summary = {}
summary_chow(reg)
summary_warning(reg)
summary_multi(reg=reg, multireg=multireg, vm=vm, instruments=False, nonspat_diag=False, spat_diag=False)
def GM_Endog_Error_Het(reg, vm, w, regimes=False):
reg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(reg, 'het')
# build coefficients table body
summary_coefs_yend(reg, reg.z_stat, lambd=True)
summary_coefs_lambda(reg, reg.z_stat)
summary_coefs_instruments(reg)
if regimes:
summary_regimes(reg)
summary_warning(reg)
summary(reg=reg, vm=vm, instruments=True, nonspat_diag=False, spat_diag=False)
def GM_Endog_Error_Het_multi(reg, multireg, vm, regimes=False):
for m in multireg:
mreg = multireg[m]
mreg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(mreg, 'het')
# build coefficients table body
summary_coefs_yend(mreg, mreg.z_stat, lambd=True)
summary_coefs_lambda(mreg, mreg.z_stat)
summary_coefs_instruments(mreg)
if regimes:
summary_regimes(mreg,chow=False)
reg.__summary = {}
summary_chow(reg)
summary_warning(reg)
summary_multi(reg=reg, multireg=multireg, vm=vm, instruments=True, nonspat_diag=False, spat_diag=False)
def GM_Combo(reg, vm, w, regimes=False):
reg.__summary = {}
# compute diagnostics and organize summary output
beta_diag_lag(reg, None)
# build coefficients table body
summary_coefs_yend(reg, reg.z_stat, lambd=True)
summary_coefs_lambda(reg, reg.z_stat)
summary_coefs_instruments(reg)
summary_warning(reg)
if regimes:
summary_regimes(reg)
summary_warning(reg)
summary(reg=reg, vm=vm, instruments=True, nonspat_diag=False, spat_diag=False)
def GM_Combo_Hom(reg, vm, w, regimes=False):
reg.__summary = {}
# compute diagnostics and organize summary output
beta_diag_lag(reg, None)
# build coefficients table body
summary_coefs_yend(reg, reg.z_stat, lambd=True)
summary_coefs_lambda(reg, reg.z_stat)
summary_coefs_instruments(reg)
if regimes:
summary_regimes(reg)
summary_warning(reg)
summary(reg=reg, vm=vm, instruments=True, nonspat_diag=False, spat_diag=False)
def GM_Combo_Het(reg, vm, w, regimes=False):
reg.__summary = {}
# compute diagnostics and organize summary output
beta_diag_lag(reg, 'het')
# build coefficients table body
summary_coefs_yend(reg, reg.z_stat, lambd=True)
summary_coefs_lambda(reg, reg.z_stat)
summary_coefs_instruments(reg)
if regimes:
summary_regimes(reg)
summary_warning(reg)
summary(reg=reg, vm=vm, instruments=True, nonspat_diag=False, spat_diag=False)
def Probit(reg, vm, w, spat_diag):
reg.__summary = {}
# compute diagnostics and organize summary output
beta_diag(reg, None)
# organize summary output
if spat_diag:
reg.__summary['summary_spat_diag'] = summary_spat_diag_probit(reg)
reg.__summary['summary_r2'] = "%-21s: %3.2f\n" % ('% correctly predicted',reg.predpc)
reg.__summary['summary_r2'] += "%-21s: %3.4f\n" % ('Log-Likelihood',reg.logl)
reg.__summary['summary_r2'] += "%-21s: %3.4f\n" % ('LR test',reg.LR[0])
reg.__summary['summary_r2'] += "%-21s: %3.4f\n" % ('LR test (p-value)',reg.LR[1])
if reg.warning:
reg.__summary['summary_r2'] += "\nMaximum number of iterations exceeded or gradient and/or function calls not changing\n"
# build coefficients table body
beta_position = summary_coefs_allx(reg, reg.z_stat)
reg.__summary['summary_other_mid']= summary_coefs_slopes(reg)
summary(reg=reg, vm=vm, instruments=False, short_intro=True, spat_diag=spat_diag)
##############################################################################
##############################################################################
############### Helper functions for running summary diagnostics #############
##############################################################################
def beta_diag_ols(reg, robust):
# compute diagnostics
reg.std_err = diagnostics.se_betas(reg)
reg.t_stat = diagnostics.t_stat(reg)
reg.r2 = diagnostics.r2(reg)
reg.ar2 = diagnostics.ar2(reg)
# organize summary output
reg.__summary['summary_std_err'] = robust
reg.__summary['summary_zt'] = 't'
reg.__summary['summary_r2'] = "%-20s:%12.6f\n%-20s:%12.4f\n" % ('R-squared',reg.r2,'Adjusted R-squared',reg.ar2)
# build coefficients table body
position = summary_coefs_allx(reg, reg.t_stat)
def beta_diag(reg, robust):
# compute diagnostics
reg.std_err = diagnostics.se_betas(reg)
reg.z_stat = diagnostics.t_stat(reg, z_stat=True)
reg.pr2 = diagnostics_tsls.pr2_aspatial(reg)
# organize summary output
reg.__summary['summary_std_err'] = robust
reg.__summary['summary_zt'] = 'z'
reg.__summary['summary_r2'] = "%-20s:%12.6f\n" % ('Pseudo R-squared',reg.pr2)
def beta_diag_lag(reg, robust):
# compute diagnostics
reg.std_err = diagnostics.se_betas(reg)
reg.z_stat = diagnostics.t_stat(reg, z_stat=True)
reg.pr2 = diagnostics_tsls.pr2_aspatial(reg)
reg.pr2_e = diagnostics_tsls.pr2_spatial(reg)
# organize summary output
reg.__summary['summary_std_err'] = robust
reg.__summary['summary_zt'] = 'z'
reg.__summary['summary_r2'] = "%-20s: %5.4f\n" % ('Pseudo R-squared',reg.pr2)
reg.__summary['summary_r2'] += "%-20s: %5.4f\n" % ('Spatial Pseudo R-squared',reg.pr2_e)
def build_coefs_body_instruments(reg):
beta_position = summary_coefs_allx(reg, reg.z_stat)
summary_coefs_yend(reg, reg.z_stat)
summary_coefs_instruments(reg)
def spat_diag_ols(reg, w, moran):
# compute diagnostics
lm_tests = diagnostics_sp.LMtests(reg, w)
reg.lm_error = lm_tests.lme
reg.lm_lag = lm_tests.lml
reg.rlm_error = lm_tests.rlme
reg.rlm_lag = lm_tests.rlml
reg.lm_sarma = lm_tests.sarma
if moran:
moran_res = diagnostics_sp.MoranRes(reg, w, z=True)
reg.moran_res = moran_res.I, moran_res.zI, moran_res.p_norm
# organize summary output
reg.__summary['summary_spat_diag'] = summary_spat_diag_ols(reg, moran)
def spat_diag_instruments(reg, w):
# compute diagnostics
cache = diagnostics_sp.spDcache(reg, w)
mi, ak, ak_p = diagnostics_sp.akTest(reg, w, cache)
reg.ak_test = ak, ak_p
# organize summary output
reg.__summary['summary_spat_diag'] = "%-27s %2d %12.6f %9.7f\n" % ("Anselin-Kelejian Test", 1, reg.ak_test[0], reg.ak_test[1])
def summary(reg, vm, instruments, short_intro=False, nonspat_diag=False, spat_diag=False, other_end=False):
summary = summary_open()
summary += summary_intro(reg,short_intro)
summary += reg.__summary['summary_r2']
if nonspat_diag:
summary += reg.__summary['summary_nonspat_diag_1']
summary += summary_coefs_intro(reg)
summary += reg.__summary['summary_coefs']
summary += "------------------------------------------------------------------------------------\n"
if instruments:
summary += reg.__summary['summary_coefs_instruments']
try:
summary += reg.__summary['summary_other_mid']
except:
pass
try:
summary += reg.__summary['summary_chow']
except:
pass
if nonspat_diag:
summary += reg.__summary['summary_nonspat_diag_2']
if spat_diag:
summary += summary_spat_diag_intro()
summary += reg.__summary['summary_spat_diag']
if vm:
summary += summary_vm(reg, instruments)
if other_end:
summary += reg.__summary['summary_other_end']
summary += summary_close()
reg.summary = summary
def summary_multi(reg, multireg, vm, instruments, short_intro=False, nonspat_diag=False, spat_diag=False, other_end=False):
summary = summary_open(multi=True)
for m in multireg:
mreg = multireg[m]
summary += "----------\n\n"
summary += summary_intro(mreg,short_intro)
summary += mreg.__summary['summary_r2']
if nonspat_diag:
summary += mreg.__summary['summary_nonspat_diag_1']
summary += summary_coefs_intro(mreg)
summary += mreg.__summary['summary_coefs']
summary += "------------------------------------------------------------------------------------\n"
if instruments:
summary += mreg.__summary['summary_coefs_instruments']
try:
summary += mreg.__summary['summary_other_mid']
except:
pass
if m == multireg.keys()[-1]:
try:
summary += reg.__summary['summary_other_mid']
except:
pass
try:
summary += reg.__summary['summary_chow']
except:
pass
if nonspat_diag:
summary += mreg.__summary['summary_nonspat_diag_2']
if spat_diag:
summary += summary_spat_diag_intro()
summary += mreg.__summary['summary_spat_diag']
if vm:
summary += summary_vm(mreg, instruments)
if other_end:
summary += mreg.__summary['summary_other_end']
if m == multireg.keys()[-1]:
try:
summary += reg.__summary['summary_other_end']
except:
pass
summary += summary_close()
reg.summary = summary
##############################################################################
##############################################################################
############### Guts of the summary printout #################################
##############################################################################
"""
This section contains the pieces needed to put together the summary printout.
"""
def summary_open(multi=False):
strSummary = ""
strSummary += "REGRESSION\n"
if not multi:
strSummary += "----------\n"
return strSummary
def summary_intro(reg,short):
title = "SUMMARY OF OUTPUT: " + reg.title + "\n"
strSummary = title
strSummary += "-" * (len(title)-1) + "\n"
strSummary += "%-20s:%12s\n" % ('Data set',reg.name_ds)
if reg.name_w:
strSummary += "%-20s:%12s\n" % ('Weights matrix',reg.name_w)
strSummary += "%-20s:%12s %-22s:%12d\n" % ('Dependent Variable',reg.name_y,'Number of Observations',reg.n)
if not short:
strSummary += "%-20s:%12.4f %-22s:%12d\n" % ('Mean dependent var',reg.mean_y,'Number of Variables',reg.k)
strSummary += "%-20s:%12.4f %-22s:%12d\n" % ('S.D. dependent var',reg.std_y,'Degrees of Freedom',reg.n-reg.k)
strSummary += '\n'
return strSummary
def summary_coefs_intro(reg):
strSummary = "\n"
if reg.__summary['summary_std_err']:
if reg.__summary['summary_std_err'].lower() == 'white':
strSummary += "White Standard Errors\n"
elif reg.__summary['summary_std_err'].lower() == 'hac':
strSummary += "HAC Standard Errors; Kernel Weights: " + reg.name_gwk +"\n"
#elif reg.__summary['summary_std_err'].lower() == 'het':
#strSummary += "Heteroskedastic Corrected Standard Errors\n"
strSummary += "------------------------------------------------------------------------------------\n"
strSummary += " Variable Coefficient Std.Error %1s-Statistic Probability\n" %(reg.__summary['summary_zt'])
strSummary += "------------------------------------------------------------------------------------\n"
return strSummary
def summary_coefs_allx(reg, zt_stat):
strSummary = ""
indices = [0]+(np.argsort(reg.name_x[1:])+1).tolist()
for i in indices:
strSummary += "%20s %12.7f %12.7f %12.7f %12.7f\n" \
% (reg.name_x[i],reg.betas[i][0],reg.std_err[i],zt_stat[i][0],zt_stat[i][1])
reg.__summary['summary_coefs'] = strSummary
return i
def summary_coefs_somex(reg, zt_stat):
"""This is a special case needed for models that do not have inference on
the lambda term
"""
strSummary = ""
indices = [0]+(np.argsort(reg.name_x[1:-1])+1).tolist()
for i in indices:
strSummary += "%20s %12.7f %12.7f %12.7f %12.7f\n" \
% (reg.name_x[i],reg.betas[i][0],reg.std_err[i],zt_stat[i][0],zt_stat[i][1])
reg.__summary['summary_coefs'] = strSummary
return i
def summary_coefs_yend(reg, zt_stat, lambd=False):
strSummary = ""
if lambd:
indices = [0]+(np.argsort(reg.name_z[1:-1])+1).tolist()
else:
indices = [0]+(np.argsort(reg.name_z[1:])+1).tolist()
for i in indices:
strSummary += "%20s %12.7f %12.7f %12.7f %12.7f\n" \
% (reg.name_z[i],reg.betas[i][0],reg.std_err[i],zt_stat[i][0],zt_stat[i][1])
reg.__summary['summary_coefs'] = strSummary
def summary_coefs_lambda(reg, zt_stat):
try:
name_var = reg.name_z
except:
name_var = reg.name_x
if len(reg.betas) == len(zt_stat):
reg.__summary['summary_coefs'] += "%20s %12.7f %12.7f %12.7f %12.7f\n" \
% (name_var[-1],reg.betas[-1][0],reg.std_err[-1],zt_stat[-1][0],zt_stat[-1][1])
else:
reg.__summary['summary_coefs'] += "%20s %12.7f \n" % (name_var[-1],reg.betas[-1][0])
def summary_coefs_instruments(reg):
"""Generates a list of the instruments used.
"""
insts = "Instruments: "
for name in sorted(reg.name_q):
insts += name + ", "
text_wrapper = TW.TextWrapper(width=76, subsequent_indent=" ")
insts = text_wrapper.fill(insts[:-2])
insts += "\n"
inst2 = "Instrumented: "
for name in sorted(reg.name_yend):
inst2 += name + ", "
text_wrapper = TW.TextWrapper(width=76, subsequent_indent=" ")
inst2 = text_wrapper.fill(inst2[:-2])
inst2 += "\n"
inst2 += insts
reg.__summary['summary_coefs_instruments'] = inst2
def summary_regimes(reg,chow=True):
"""Lists the regimes variable used.
"""
try:
reg.__summary['summary_other_mid'] += "Regimes variable: %s\n" %reg.name_regimes
except:
reg.__summary['summary_other_mid'] = "Regimes variable: %s\n" %reg.name_regimes
if chow:
summary_chow(reg)
def summary_chow(reg):
reg.__summary['summary_chow'] = "\nREGIMES DIAGNOSTICS - CHOW TEST\n"
reg.__summary['summary_chow'] += " VARIABLE DF VALUE PROB\n"
if reg.cols2regi == 'all':
names_chow = reg.name_x_r[1:]
else:
names_chow = [reg.name_x_r[1:][i] for i in np.where(reg.cols2regi)[0]]
if reg.constant_regi=='many':
indices = [0]+(np.argsort(names_chow)+1).tolist()
names_chow = ['CONSTANT']+names_chow
else:
indices = (np.argsort(names_chow)).tolist()
for i in indices:
reg.__summary['summary_chow'] += "%25s %2d %12.6f %9.7f\n" %(names_chow[i],reg.nr-1,reg.chow.regi[i,0],reg.chow.regi[i,1])
reg.__summary['summary_chow'] += "%25s %2d %12.6f %9.7f\n" %('Global test',reg.kr*(reg.nr-1),reg.chow.joint[0],reg.chow.joint[1])
def summary_warning(reg):
try:
try:
reg.__summary['summary_other_mid'] += reg.warning+"\n"
except:
reg.__summary['summary_other_mid'] = reg.warning+"\n"
except:
pass
def summary_coefs_slopes(reg):
strSummary = "\nMARGINAL EFFECTS\n"
if reg.scalem == 'phimean':
strSummary += "Method: Mean of individual marginal effects\n"
elif reg.scalem == 'xmean':
strSummary += "Method: Marginal effects at variables mean\n"
strSummary += "------------------------------------------------------------------------------------\n"
strSummary += " Variable Slope Std.Error %1s-Statistic Probability\n" %(reg.__summary['summary_zt'])
strSummary += "------------------------------------------------------------------------------------\n"
indices = np.argsort(reg.name_x[1:]).tolist()
for i in indices:
strSummary += "%20s %12.7f %12.7f %12.7f %12.7f\n" \
% (reg.name_x[i+1],reg.slopes[i][0],reg.slopes_std_err[i],reg.slopes_z_stat[i][0],reg.slopes_z_stat[i][1])
return strSummary+"\n\n"
def summary_r2(reg, ols, spatial_lag):
if ols:
strSummary = "%-20s:%12.6f\n%-20s:%12.4f\n" % ('R-squared',reg.r2,'Adjusted R-squared',reg.ar2)
else:
strSummary = "%-20s:%12.6f\n" % ('Pseudo R-squared',reg.pr2)
if spatial_lag:
if reg.pr2_e != None:
strSummary += "%-20s:%12.6f\n" % ('Spatial Pseudo R-squared',reg.pr2_e)
return strSummary
def summary_nonspat_diag_1(reg):
strSummary = ""
strSummary += "%-20s:%12.3f %-22s:%12.4f\n" % ('Sum squared residual',reg.utu,'F-statistic',reg.f_stat[0])
strSummary += "%-20s:%12.3f %-22s:%12.4g\n" % ('Sigma-square',reg.sig2,'Prob(F-statistic)',reg.f_stat[1])
strSummary += "%-20s:%12.3f %-22s:%12.3f\n" % ('S.E. of regression',np.sqrt(reg.sig2),'Log likelihood',reg.logll)
strSummary += "%-20s:%12.3f %-22s:%12.3f\n" % ('Sigma-square ML',reg.sig2ML,'Akaike info criterion',reg.aic)
strSummary += "%-20s:%12.4f %-22s:%12.3f\n" % ('S.E of regression ML',np.sqrt(reg.sig2ML),'Schwarz criterion',reg.schwarz)
return strSummary
def summary_nonspat_diag_2(reg):
strSummary = ""
strSummary += "\nREGRESSION DIAGNOSTICS\n"
if reg.mulColli:
strSummary += "MULTICOLLINEARITY CONDITION NUMBER %16.6f\n\n" % (reg.mulColli)
strSummary += "TEST ON NORMALITY OF ERRORS\n"
strSummary += "TEST DF VALUE PROB\n"
strSummary += "%-27s %2d %14.6f %9.7f\n\n" % ('Jarque-Bera',reg.jarque_bera['df'],reg.jarque_bera['jb'],reg.jarque_bera['pvalue'])
strSummary += "DIAGNOSTICS FOR HETEROSKEDASTICITY\n"
strSummary += "RANDOM COEFFICIENTS\n"
strSummary += "TEST DF VALUE PROB\n"
strSummary += "%-27s %2d %12.6f %9.7f\n" % ('Breusch-Pagan test',reg.breusch_pagan['df'],reg.breusch_pagan['bp'],reg.breusch_pagan['pvalue'])
strSummary += "%-27s %2d %12.6f %9.7f\n" % ('Koenker-Bassett test',reg.koenker_bassett['df'],reg.koenker_bassett['kb'],reg.koenker_bassett['pvalue'])
if reg.white:
strSummary += "\nSPECIFICATION ROBUST TEST\n"
if len(reg.white)>3:
strSummary += reg.white+'\n'
else:
strSummary += "TEST DF VALUE PROB\n"
strSummary += "%-27s %2d %12.6f %9.7f\n" %('White',reg.white['df'],reg.white['wh'],reg.white['pvalue'])
return strSummary
def summary_spat_diag_intro():
strSummary = ""
strSummary += "\nDIAGNOSTICS FOR SPATIAL DEPENDENCE\n"
strSummary += "TEST MI/DF VALUE PROB\n"
return strSummary
def summary_spat_diag_ols(reg, moran):
strSummary = ""
if moran:
strSummary += "%-27s %8.4f %8.6f %9.7f\n" % ("Moran's I (error)", reg.moran_res[0], reg.moran_res[1], reg.moran_res[2])
strSummary += "%-27s %2d %12.6f %9.7f\n" % ("Lagrange Multiplier (lag)", 1, reg.lm_lag[0], reg.lm_lag[1])
strSummary += "%-27s %2d %12.6f %9.7f\n" % ("Robust LM (lag)", 1, reg.rlm_lag[0], reg.rlm_lag[1])
strSummary += "%-27s %2d %12.6f %9.7f\n" % ("Lagrange Multiplier (error)", 1, reg.lm_error[0], reg.lm_error[1])
strSummary += "%-27s %2d %12.6f %9.7f\n" % ("Robust LM (error)", 1, reg.rlm_error[0], reg.rlm_error[1])
strSummary += "%-27s %2d %12.6f %9.7f\n\n" % ("Lagrange Multiplier (SARMA)", 2, reg.lm_sarma[0], reg.lm_sarma[1])
return strSummary
def summary_spat_diag_probit(reg):
strSummary = ""
strSummary += "%-27s %2d %12.6f %9.7f\n" % ("Kelejian-Prucha (error)", 1, reg.KP_error[0], reg.KP_error[1])
strSummary += "%-27s %2d %12.6f %9.7f\n" % ("Pinkse (error)", 1, reg.Pinkse_error[0], reg.Pinkse_error[1])
strSummary += "%-27s %2d %12.6f %9.7f\n\n" % ("Pinkse-Slade (error)", 1, reg.PS_error[0], reg.PS_error[1])
return strSummary
def summary_vm(reg, instruments):
strVM = "\n"
strVM += "COEFFICIENTS VARIANCE MATRIX\n"
strVM += "----------------------------\n"
if instruments:
for name in reg.name_z:
strVM += "%12s" % (name)
else:
for name in reg.name_x:
strVM += "%12s" % (name)
strVM += "\n"
nrow = reg.vm.shape[0]
ncol = reg.vm.shape[1]
for i in range(nrow):
for j in range(ncol):
strVM += "%12.6f" % (reg.vm[i][j])
strVM += "\n"
return strVM
def summary_pred(reg):
strPred = "\n\n"
strPred += "%16s%16s%16s%16s\n" % ('OBS',reg.name_y,'PREDICTED','RESIDUAL')
for i in range(reg.n):
strPred += "%16d%16.5f%16.5f%16.5f\n" % (i+1,reg.y[i][0],reg.predy[i][0],reg.u[i][0])
return strPred
def summary_close():
return "================================ END OF REPORT ====================================="
##############################################################################
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| bsd-3-clause |
tigersirvine/occtigerscricket | django/contrib/gis/db/backends/mysql/operations.py | 92 | 2422 | from django.db.backends.mysql.base import DatabaseOperations
from django.contrib.gis.db.backends.adapter import WKTAdapter
from django.contrib.gis.db.backends.base import BaseSpatialOperations
class MySQLOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.backends.mysql.compiler'
mysql = True
name = 'mysql'
select = 'AsText(%s)'
from_wkb = 'GeomFromWKB'
from_text = 'GeomFromText'
Adapter = WKTAdapter
Adaptor = Adapter # Backwards-compatibility alias.
geometry_functions = {
'bbcontains' : 'MBRContains', # For consistency w/PostGIS API
'bboverlaps' : 'MBROverlaps', # .. ..
'contained' : 'MBRWithin', # .. ..
'contains' : 'MBRContains',
'disjoint' : 'MBRDisjoint',
'equals' : 'MBREqual',
'exact' : 'MBREqual',
'intersects' : 'MBRIntersects',
'overlaps' : 'MBROverlaps',
'same_as' : 'MBREqual',
'touches' : 'MBRTouches',
'within' : 'MBRWithin',
}
gis_terms = dict([(term, None) for term in geometry_functions.keys() + ['isnull']])
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, value, srid):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'expression'):
placeholder = '%s.%s' % tuple(map(self.quote_name, value.cols[value.expression]))
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
alias, col, db_type = lvalue
geo_col = '%s.%s' % (qn(alias), qn(col))
lookup_info = self.geometry_functions.get(lookup_type, False)
if lookup_info:
return "%s(%s, %s)" % (lookup_info, geo_col,
self.get_geom_placeholder(value, field.srid))
# TODO: Is this really necessary? MySQL can't handle NULL geometries
# in its spatial indexes anyways.
if lookup_type == 'isnull':
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
| bsd-3-clause |
loseblue/vim-ycm-windows-64 | third_party/requests/requests/packages/urllib3/poolmanager.py | 550 | 8977 | # urllib3/poolmanager.py
# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
try: # Python 3
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from ._collections import RecentlyUsedContainer
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from .connectionpool import port_by_scheme
from .request import RequestMethods
from .util import parse_url
__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
pool_classes_by_scheme = {
'http': HTTPConnectionPool,
'https': HTTPSConnectionPool,
}
log = logging.getLogger(__name__)
SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
'ssl_version')
class PoolManager(RequestMethods):
"""
Allows for arbitrary requests while transparently keeping track of
necessary connection pools for you.
:param num_pools:
Number of connection pools to cache before discarding the least
recently used pool.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param \**connection_pool_kw:
Additional parameters are used to create fresh
:class:`urllib3.connectionpool.ConnectionPool` instances.
Example: ::
>>> manager = PoolManager(num_pools=2)
>>> r = manager.request('GET', 'http://google.com/')
>>> r = manager.request('GET', 'http://google.com/mail')
>>> r = manager.request('GET', 'http://yahoo.com/')
>>> len(manager.pools)
2
"""
proxy = None
def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
RequestMethods.__init__(self, headers)
self.connection_pool_kw = connection_pool_kw
self.pools = RecentlyUsedContainer(num_pools,
dispose_func=lambda p: p.close())
def _new_pool(self, scheme, host, port):
"""
Create a new :class:`ConnectionPool` based on host, port and scheme.
This method is used to actually create the connection pools handed out
by :meth:`connection_from_url` and companion methods. It is intended
to be overridden for customization.
"""
pool_cls = pool_classes_by_scheme[scheme]
kwargs = self.connection_pool_kw
if scheme == 'http':
kwargs = self.connection_pool_kw.copy()
for kw in SSL_KEYWORDS:
kwargs.pop(kw, None)
return pool_cls(host, port, **kwargs)
def clear(self):
"""
Empty our store of pools and direct them all to close.
This will not affect in-flight connections, but they will not be
re-used after completion.
"""
self.pools.clear()
def connection_from_host(self, host, port=None, scheme='http'):
"""
Get a :class:`ConnectionPool` based on the host, port, and scheme.
If ``port`` isn't given, it will be derived from the ``scheme`` using
``urllib3.connectionpool.port_by_scheme``.
"""
scheme = scheme or 'http'
port = port or port_by_scheme.get(scheme, 80)
pool_key = (scheme, host, port)
with self.pools.lock:
# If the scheme, host, or port doesn't match existing open
# connections, open a new ConnectionPool.
pool = self.pools.get(pool_key)
if pool:
return pool
# Make a fresh ConnectionPool of the desired type
pool = self._new_pool(scheme, host, port)
self.pools[pool_key] = pool
return pool
def connection_from_url(self, url):
"""
Similar to :func:`urllib3.connectionpool.connection_from_url` but
doesn't pass any additional parameters to the
:class:`urllib3.connectionpool.ConnectionPool` constructor.
Additional parameters are taken from the :class:`.PoolManager`
constructor.
"""
u = parse_url(url)
return self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
def urlopen(self, method, url, redirect=True, **kw):
"""
Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
with custom cross-host redirect logic and only sends the request-uri
portion of the ``url``.
The given ``url`` parameter must be absolute, such that an appropriate
:class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
"""
u = parse_url(url)
conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
kw['assert_same_host'] = False
kw['redirect'] = False
if 'headers' not in kw:
kw['headers'] = self.headers
if self.proxy is not None and u.scheme == "http":
response = conn.urlopen(method, url, **kw)
else:
response = conn.urlopen(method, u.request_uri, **kw)
redirect_location = redirect and response.get_redirect_location()
if not redirect_location:
return response
# Support relative URLs for redirecting.
redirect_location = urljoin(url, redirect_location)
# RFC 2616, Section 10.3.4
if response.status == 303:
method = 'GET'
log.info("Redirecting %s -> %s" % (url, redirect_location))
kw['retries'] = kw.get('retries', 3) - 1 # Persist retries countdown
kw['redirect'] = redirect
return self.urlopen(method, redirect_location, **kw)
class ProxyManager(PoolManager):
"""
Behaves just like :class:`PoolManager`, but sends all requests through
the defined proxy, using the CONNECT method for HTTPS URLs.
:param proxy_url:
The URL of the proxy to be used.
:param proxy_headers:
A dictionary contaning headers that will be sent to the proxy. In case
of HTTP they are being sent with each request, while in the
HTTPS/CONNECT case they are sent only once. Could be used for proxy
authentication.
Example:
>>> proxy = urllib3.ProxyManager('http://localhost:3128/')
>>> r1 = proxy.request('GET', 'http://google.com/')
>>> r2 = proxy.request('GET', 'http://httpbin.org/')
>>> len(proxy.pools)
1
>>> r3 = proxy.request('GET', 'https://httpbin.org/')
>>> r4 = proxy.request('GET', 'https://twitter.com/')
>>> len(proxy.pools)
3
"""
def __init__(self, proxy_url, num_pools=10, headers=None,
proxy_headers=None, **connection_pool_kw):
if isinstance(proxy_url, HTTPConnectionPool):
proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
proxy_url.port)
proxy = parse_url(proxy_url)
if not proxy.port:
port = port_by_scheme.get(proxy.scheme, 80)
proxy = proxy._replace(port=port)
self.proxy = proxy
self.proxy_headers = proxy_headers or {}
assert self.proxy.scheme in ("http", "https"), \
'Not supported proxy scheme %s' % self.proxy.scheme
connection_pool_kw['_proxy'] = self.proxy
connection_pool_kw['_proxy_headers'] = self.proxy_headers
super(ProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw)
def connection_from_host(self, host, port=None, scheme='http'):
if scheme == "https":
return super(ProxyManager, self).connection_from_host(
host, port, scheme)
return super(ProxyManager, self).connection_from_host(
self.proxy.host, self.proxy.port, self.proxy.scheme)
def _set_proxy_headers(self, url, headers=None):
"""
Sets headers needed by proxies: specifically, the Accept and Host
headers. Only sets headers not provided by the user.
"""
headers_ = {'Accept': '*/*'}
netloc = parse_url(url).netloc
if netloc:
headers_['Host'] = netloc
if headers:
headers_.update(headers)
return headers_
def urlopen(self, method, url, redirect=True, **kw):
"Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
u = parse_url(url)
if u.scheme == "http":
# For proxied HTTPS requests, httplib sets the necessary headers
# on the CONNECT to the proxy. For HTTP, we'll definitely
# need to set 'Host' at the very least.
kw['headers'] = self._set_proxy_headers(url, kw.get('headers',
self.headers))
return super(ProxyManager, self).urlopen(method, url, redirect, **kw)
def proxy_from_url(url, **kw):
return ProxyManager(proxy_url=url, **kw)
| gpl-3.0 |
wdzhou/mantid | scripts/SANS/sans/state/data.py | 3 | 7257 | # pylint: disable=too-few-public-methods
"""State about the actual data which is to be reduced."""
from __future__ import (absolute_import, division, print_function)
import json
import copy
from sans.state.state_base import (StateBase, StringParameter, PositiveIntegerParameter, BoolParameter,
ClassTypeParameter, rename_descriptor_names)
from sans.common.enums import (SANSInstrument, SANSFacility)
import sans.common.constants
from sans.state.state_functions import (is_pure_none_or_not_none, validation_message)
from sans.common.file_information import SANSFileInformationFactory
from sans.state.automatic_setters import automatic_setters
# ----------------------------------------------------------------------------------------------------------------------
# State
# ----------------------------------------------------------------------------------------------------------------------
@rename_descriptor_names
class StateData(StateBase):
ALL_PERIODS = sans.common.constants.ALL_PERIODS
sample_scatter = StringParameter()
sample_scatter_period = PositiveIntegerParameter()
sample_transmission = StringParameter()
sample_transmission_period = PositiveIntegerParameter()
sample_direct = StringParameter()
sample_direct_period = PositiveIntegerParameter()
can_scatter = StringParameter()
can_scatter_period = PositiveIntegerParameter()
can_transmission = StringParameter()
can_transmission_period = PositiveIntegerParameter()
can_direct = StringParameter()
can_direct_period = PositiveIntegerParameter()
calibration = StringParameter()
sample_scatter_run_number = PositiveIntegerParameter()
sample_scatter_is_multi_period = BoolParameter()
instrument = ClassTypeParameter(SANSInstrument)
facility = ClassTypeParameter(SANSFacility)
idf_file_path = StringParameter()
ipf_file_path = StringParameter()
def __init__(self):
super(StateData, self).__init__()
# Setup default values for periods
self.sample_scatter_period = StateData.ALL_PERIODS
self.sample_transmission_period = StateData.ALL_PERIODS
self.sample_direct_period = StateData.ALL_PERIODS
self.can_scatter_period = StateData.ALL_PERIODS
self.can_transmission_period = StateData.ALL_PERIODS
self.can_direct_period = StateData.ALL_PERIODS
# This should be reset by the builder. Setting this to NoInstrument ensure that we will trip early on,
# in case this is not set, for example by not using the builders.
self.instrument = SANSInstrument.NoInstrument
self.facility = SANSFacility.NoFacility
def validate(self):
is_invalid = dict()
# A sample scatter must be specified
if not self.sample_scatter:
entry = validation_message("Sample scatter was not specified.",
"Make sure that the sample scatter file is specified.",
{"sample_scatter": self.sample_scatter})
is_invalid.update(entry)
# If the sample transmission/direct was specified, then a sample direct/transmission is required
if not is_pure_none_or_not_none([self.sample_transmission, self.sample_direct]):
entry = validation_message("If the sample transmission is specified then, the direct run needs to be "
"specified too.",
"Make sure that the transmission and direct runs are both specified (or none).",
{"sample_transmission": self.sample_transmission,
"sample_direct": self.sample_direct})
is_invalid.update(entry)
# If the can transmission/direct was specified, then this requires the can scatter
if (self.can_direct or self.can_transmission) and (not self.can_scatter):
entry = validation_message("If the can transmission is specified then the can scatter run needs to be "
"specified too.",
"Make sure that the can scatter file is set.",
{"can_scatter": self.can_scatter,
"can_transmission": self.can_transmission,
"can_direct": self.can_direct})
is_invalid.update(entry)
# If a can transmission/direct was specified, then the other can entries need to be specified as well.
if self.can_scatter and not is_pure_none_or_not_none([self.can_transmission, self.can_direct]):
entry = validation_message("Inconsistent can transmission setting.",
"Make sure that the can transmission and can direct runs are set (or none of"
" them).",
{"can_transmission": self.can_transmission,
"can_direct": self.can_direct})
is_invalid.update(entry)
if is_invalid:
raise ValueError("StateData: The provided inputs are illegal. "
"Please see: {0}".format(json.dumps(is_invalid)))
# ----------------------------------------------------------------------------------------------------------------------
# Builder
# ----------------------------------------------------------------------------------------------------------------------
def set_information_from_file(data_info):
file_name = data_info.sample_scatter
file_information_factory = SANSFileInformationFactory()
file_information = file_information_factory.create_sans_file_information(file_name)
instrument = file_information.get_instrument()
facility = file_information.get_facility()
run_number = file_information.get_run_number()
data_info.instrument = instrument
data_info.facility = facility
data_info.sample_scatter_run_number = run_number
data_info.sample_scatter_is_multi_period = file_information.get_number_of_periods() > 1
data_info.idf_file_path = file_information.get_idf_file_path()
data_info.ipf_file_path = file_information.get_ipf_file_path()
class StateDataBuilder(object):
@automatic_setters(StateData)
def __init__(self):
super(StateDataBuilder, self).__init__()
self.state = StateData()
def build(self):
# Make sure that the product is in a valid state, ie not incomplete
self.state.validate()
# There are some elements which need to be read from the file. This is currently:
# 1. instrument
# 2. sample_scatter_run_number
set_information_from_file(self.state)
return copy.copy(self.state)
# ------------------------------------------
# Factory method for StateDataBuilder
# ------------------------------------------
def get_data_builder(facility):
if facility is SANSFacility.ISIS:
return StateDataBuilder()
else:
raise NotImplementedError("StateDataBuilder: The selected facility {0} does not seem"
" to exist".format(str(facility)))
| gpl-3.0 |
naemono/pyrax | tests/unit/test_autoscale.py | 11 | 69602 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import unittest
from mock import patch
from mock import MagicMock as Mock
import pyrax
import pyrax.autoscale
from pyrax.autoscale import AutoScaleClient
from pyrax.autoscale import AutoScalePolicy
from pyrax.autoscale import AutoScaleWebhook
from pyrax.autoscale import ScalingGroup
from pyrax.autoscale import ScalingGroupManager
import pyrax.exceptions as exc
import pyrax.utils as utils
from pyrax import fakes
class AutoscaleTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(AutoscaleTest, self).__init__(*args, **kwargs)
def setUp(self):
self.identity = fakes.FakeIdentity()
self.scaling_group = fakes.FakeScalingGroup(self.identity)
def tearDown(self):
pass
def test_make_policies(self):
sg = self.scaling_group
p1 = utils.random_unicode()
p2 = utils.random_unicode()
sg.scalingPolicies = [{"name": p1}, {"name": p2}]
sg._make_policies()
self.assertEqual(len(sg.policies), 2)
polnames = [pol.name for pol in sg.policies]
self.assert_(p1 in polnames)
self.assert_(p2 in polnames)
def test_get_state(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get_state = Mock()
sg.get_state()
mgr.get_state.assert_called_once_with(sg)
def test_pause(self):
sg = self.scaling_group
mgr = sg.manager
mgr.pause = Mock()
sg.pause()
mgr.pause.assert_called_once_with(sg)
def test_resume(self):
sg = self.scaling_group
mgr = sg.manager
mgr.resume = Mock()
sg.resume()
mgr.resume.assert_called_once_with(sg)
def test_update(self):
sg = self.scaling_group
mgr = sg.manager
mgr.update = Mock()
name = utils.random_unicode()
cooldown = utils.random_unicode()
min_entities = utils.random_unicode()
max_entities = utils.random_unicode()
metadata = utils.random_unicode()
sg.update(name=name, cooldown=cooldown, min_entities=min_entities,
max_entities=max_entities, metadata=metadata)
mgr.update.assert_called_once_with(sg, name=name, cooldown=cooldown,
min_entities=min_entities, max_entities=max_entities,
metadata=metadata)
def test_update_metadata(self):
sg = self.scaling_group
mgr = sg.manager
mgr.update_metadata = Mock()
metadata = utils.random_unicode()
sg.update_metadata(metadata)
mgr.update_metadata.assert_called_once_with(sg, metadata=metadata)
def test_get_configuration(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get_configuration = Mock()
sg.get_configuration()
mgr.get_configuration.assert_called_once_with(sg)
def test_get_launch_config(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get_launch_config = Mock()
sg.get_launch_config()
mgr.get_launch_config.assert_called_once_with(sg)
def test_update_launch_config(self):
sg = self.scaling_group
mgr = sg.manager
mgr.update_launch_config = Mock()
server_name = utils.random_unicode()
flavor = utils.random_unicode()
image = utils.random_unicode()
disk_config = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode()
networks = utils.random_unicode()
load_balancers = utils.random_unicode()
key_name = utils.random_unicode()
config_drive = utils.random_unicode()
user_data = utils.random_unicode()
sg.update_launch_config(server_name=server_name, flavor=flavor,
image=image, disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
mgr.update_launch_config.assert_called_once_with(sg,
server_name=server_name, flavor=flavor, image=image,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
def test_update_launch_metadata(self):
sg = self.scaling_group
mgr = sg.manager
mgr.update_launch_metadata = Mock()
metadata = utils.random_unicode()
sg.update_launch_metadata(metadata)
mgr.update_launch_metadata.assert_called_once_with(sg, metadata)
def test_add_policy(self):
sg = self.scaling_group
mgr = sg.manager
name = utils.random_unicode()
policy_type = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
is_percent = utils.random_unicode()
desired_capacity = utils.random_unicode()
args = utils.random_unicode()
mgr.add_policy = Mock()
sg.add_policy(name, policy_type, cooldown, change,
is_percent=is_percent, desired_capacity=desired_capacity,
args=args)
mgr.add_policy.assert_called_once_with(sg, name, policy_type, cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def test_list_policies(self):
sg = self.scaling_group
mgr = sg.manager
mgr.list_policies = Mock()
sg.list_policies()
mgr.list_policies.assert_called_once_with(sg)
def test_get_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
mgr.get_policy = Mock()
sg.get_policy(pol)
mgr.get_policy.assert_called_once_with(sg, pol)
def test_update_policy(self):
sg = self.scaling_group
mgr = sg.manager
policy = utils.random_unicode()
name = utils.random_unicode()
policy_type = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
desired_capacity = utils.random_unicode()
is_percent = utils.random_unicode()
args = utils.random_unicode()
mgr.update_policy = Mock()
sg.update_policy(policy, name=name, policy_type=policy_type,
cooldown=cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
mgr.update_policy.assert_called_once_with(scaling_group=sg,
policy=policy, name=name, policy_type=policy_type,
cooldown=cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def test_execute_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
mgr.execute_policy = Mock()
sg.execute_policy(pol)
mgr.execute_policy.assert_called_once_with(scaling_group=sg,
policy=pol)
def test_delete_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
mgr.delete_policy = Mock()
sg.delete_policy(pol)
mgr.delete_policy.assert_called_once_with(scaling_group=sg,
policy=pol)
def test_add_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.add_webhook = Mock()
sg.add_webhook(pol, name, metadata=metadata)
mgr.add_webhook.assert_called_once_with(sg, pol, name,
metadata=metadata)
def test_list_webhooks(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
mgr.list_webhooks = Mock()
sg.list_webhooks(pol)
mgr.list_webhooks.assert_called_once_with(sg, pol)
def test_update_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
hook = utils.random_unicode()
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update_webhook = Mock()
sg.update_webhook(pol, hook, name=name, metadata=metadata)
mgr.update_webhook.assert_called_once_with(scaling_group=sg, policy=pol,
webhook=hook, name=name, metadata=metadata)
def test_update_webhook_metadata(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
hook = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update_webhook_metadata = Mock()
sg.update_webhook_metadata(pol, hook, metadata=metadata)
mgr.update_webhook_metadata.assert_called_once_with(sg, pol, hook,
metadata)
def test_delete_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
hook = utils.random_unicode()
mgr.delete_webhook = Mock()
sg.delete_webhook(pol, hook)
mgr.delete_webhook.assert_called_once_with(sg, pol, hook)
def test_policy_count(self):
sg = self.scaling_group
num = random.randint(1, 100)
sg.policies = ["x"] * num
self.assertEqual(sg.policy_count, num)
def test_name(self):
sg = self.scaling_group
name = utils.random_unicode()
newname = utils.random_unicode()
sg.groupConfiguration = {"name": name}
self.assertEqual(sg.name, name)
sg.name = newname
self.assertEqual(sg.name, newname)
def test_cooldown(self):
sg = self.scaling_group
cooldown = utils.random_unicode()
newcooldown = utils.random_unicode()
sg.groupConfiguration = {"cooldown": cooldown}
self.assertEqual(sg.cooldown, cooldown)
sg.cooldown = newcooldown
self.assertEqual(sg.cooldown, newcooldown)
def test_metadata(self):
sg = self.scaling_group
metadata = utils.random_unicode()
newmetadata = utils.random_unicode()
sg.groupConfiguration = {"metadata": metadata}
self.assertEqual(sg.metadata, metadata)
sg.metadata = newmetadata
self.assertEqual(sg.metadata, newmetadata)
def test_min_entities(self):
sg = self.scaling_group
min_entities = utils.random_unicode()
newmin_entities = utils.random_unicode()
sg.groupConfiguration = {"minEntities": min_entities}
self.assertEqual(sg.min_entities, min_entities)
sg.min_entities = newmin_entities
self.assertEqual(sg.min_entities, newmin_entities)
def test_max_entities(self):
sg = self.scaling_group
max_entities = utils.random_unicode()
newmax_entities = utils.random_unicode()
sg.groupConfiguration = {"maxEntities": max_entities}
self.assertEqual(sg.max_entities, max_entities)
sg.max_entities = newmax_entities
self.assertEqual(sg.max_entities, newmax_entities)
def test_mgr_get_state(self):
sg = self.scaling_group
mgr = sg.manager
id1 = utils.random_unicode()
id2 = utils.random_unicode()
ac = utils.random_unicode()
dc = utils.random_unicode()
pc = utils.random_unicode()
paused = utils.random_unicode()
statedict = {"group": {
"active": [{"id": id1}, {"id": id2}],
"activeCapacity": ac,
"desiredCapacity": dc,
"pendingCapacity": pc,
"paused": paused,
}}
expected = {
"active": [id1, id2],
"active_capacity": ac,
"desired_capacity": dc,
"pending_capacity": pc,
"paused": paused,
}
mgr.api.method_get = Mock(return_value=(None, statedict))
ret = mgr.get_state(sg)
self.assertEqual(ret, expected)
def test_mgr_pause(self):
sg = self.scaling_group
mgr = sg.manager
uri = "/%s/%s/pause" % (mgr.uri_base, sg.id)
mgr.api.method_post = Mock(return_value=(None, None))
mgr.pause(sg)
mgr.api.method_post.assert_called_once_with(uri)
def test_mgr_resume(self):
sg = self.scaling_group
mgr = sg.manager
uri = "/%s/%s/resume" % (mgr.uri_base, sg.id)
mgr.api.method_post = Mock(return_value=(None, None))
mgr.resume(sg)
mgr.api.method_post.assert_called_once_with(uri)
def test_mgr_get_configuration(self):
sg = self.scaling_group
mgr = sg.manager
uri = "/%s/%s/config" % (mgr.uri_base, sg.id)
conf = utils.random_unicode()
resp_body = {"groupConfiguration": conf}
mgr.api.method_get = Mock(return_value=(None, resp_body))
ret = mgr.get_configuration(sg)
mgr.api.method_get.assert_called_once_with(uri)
self.assertEqual(ret, conf)
def test_mgr_update(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
uri = "/%s/%s/config" % (mgr.uri_base, sg.id)
sg.name = utils.random_unicode()
sg.cooldown = utils.random_unicode()
sg.min_entities = utils.random_unicode()
sg.max_entities = utils.random_unicode()
metadata = utils.random_unicode()
mgr.api.method_put = Mock(return_value=(None, None))
expected_body = {"name": sg.name,
"cooldown": sg.cooldown,
"minEntities": sg.min_entities,
"maxEntities": sg.max_entities,
"metadata": metadata,
}
mgr.update(sg.id, metadata=metadata)
mgr.api.method_put.assert_called_once_with(uri, body=expected_body)
def test_mgr_replace(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
uri = "/%s/%s/config" % (mgr.uri_base, sg.id)
sg.name = utils.random_unicode()
sg.cooldown = utils.random_unicode()
sg.min_entities = utils.random_unicode()
sg.max_entities = utils.random_unicode()
metadata = utils.random_unicode()
new_name = utils.random_unicode()
new_cooldown = utils.random_unicode()
new_min = utils.random_unicode()
new_max = utils.random_unicode()
mgr.api.method_put = Mock(return_value=(None, None))
expected_body = {
"name": new_name,
"cooldown": new_cooldown,
"minEntities": new_min,
"maxEntities": new_max,
"metadata": {}
}
mgr.replace(sg.id, new_name, new_cooldown, new_min, new_max)
mgr.api.method_put.assert_called_once_with(uri, body=expected_body)
def test_mgr_update_metadata(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
sg.metadata = {"orig": "orig"}
metadata = {"new": "new"}
expected = sg.metadata.copy()
expected.update(metadata)
mgr.update = Mock()
mgr.update_metadata(sg.id, metadata)
mgr.update.assert_called_once_with(sg, metadata=expected)
def test_mgr_get_launch_config(self):
sg = self.scaling_group
mgr = sg.manager
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode()
networks = utils.random_unicode()
key_name = utils.random_unicode()
launchdict = {"launchConfiguration":
{"type": typ,
"args": {
"loadBalancers": lbs,
"server": {
"name": name,
"flavorRef": flv,
"imageRef": img,
"OS-DCF:diskConfig": dconfig,
"metadata": metadata,
"personality": personality,
"networks": networks,
"key_name": key_name,
},
},
},
}
expected = {
"type": typ,
"load_balancers": lbs,
"name": name,
"flavor": flv,
"image": img,
"disk_config": dconfig,
"metadata": metadata,
"personality": personality,
"networks": networks,
"key_name": key_name,
}
mgr.api.method_get = Mock(return_value=(None, launchdict))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
ret = mgr.get_launch_config(sg)
mgr.api.method_get.assert_called_once_with(uri)
self.assertEqual(ret, expected)
def test_mgr_update_launch_config(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode()
networks = utils.random_unicode()
sg.launchConfiguration = {}
body = {"type": "launch_server",
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"personality": mgr._encode_personality(personality),
"networks": networks,
"metadata": metadata,
},
"loadBalancers": lbs,
},
}
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
mgr.update_launch_config(sg.id, server_name=name, flavor=flv, image=img,
disk_config=dconfig, metadata=metadata,
personality=personality, networks=networks, load_balancers=lbs)
mgr.api.method_put.assert_called_once_with(uri, body=body)
def test_mgr_update_launch_config_unset_personality(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
metadata = utils.random_unicode()
personality = [{
"path": "/foo/bar",
"contents": "cHlyYXg="
}]
networks = utils.random_unicode()
sg.launchConfiguration = {
"type": "launch_server",
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"personality": personality,
"networks": networks,
"metadata": metadata,
},
"loadBalancers": lbs,
},
}
body = {
"type": "launch_server",
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"networks": networks,
"metadata": metadata,
},
"loadBalancers": lbs,
},
}
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
mgr.update_launch_config(sg.id, server_name=name, flavor=flv, image=img,
disk_config=dconfig, metadata=metadata,
personality=[], networks=networks, load_balancers=lbs)
mgr.api.method_put.assert_called_once_with(uri, body=body)
def test_mgr_update_launch_config_no_personality(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
metadata = utils.random_unicode()
networks = utils.random_unicode()
sg.launchConfiguration = {}
body = {"type": "launch_server",
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"networks": networks,
"metadata": metadata,
},
"loadBalancers": lbs,
},
}
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
mgr.update_launch_config(sg.id, server_name=name, flavor=flv, image=img,
disk_config=dconfig, metadata=metadata,
networks=networks, load_balancers=lbs)
mgr.api.method_put.assert_called_once_with(uri, body=body)
def test_mgr_update_launch_config_no_metadata(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
networks = utils.random_unicode()
sg.launchConfiguration = {}
body = {"type": "launch_server",
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"networks": networks,
},
"loadBalancers": lbs,
},
}
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
mgr.update_launch_config(sg.id, server_name=name, flavor=flv, image=img,
disk_config=dconfig, networks=networks, load_balancers=lbs)
mgr.api.method_put.assert_called_once_with(uri, body=body)
def test_mgr_update_launch_config_key_name(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode()
networks = utils.random_unicode()
key_name = utils.random_unicode()
sg.launchConfiguration = {}
body = {"type": "launch_server",
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"networks": networks,
"metadata": metadata,
"key_name": key_name,
"personality": mgr._encode_personality(personality),
},
"loadBalancers": lbs,
},
}
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
mgr.update_launch_config(sg.id, server_name=name, flavor=flv, image=img,
disk_config=dconfig, metadata=metadata,
personality=personality, networks=networks, load_balancers=lbs,
key_name=key_name)
mgr.api.method_put.assert_called_once_with(uri, body=body)
def test_mgr_replace_launch_config(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
typ = utils.random_unicode()
lbs = utils.random_unicode()
name = utils.random_unicode()
flv = utils.random_unicode()
img = utils.random_unicode()
dconfig = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode()
networks = utils.random_unicode()
sg.launchConfiguration = {
"type": typ,
"args": {
"server": {
"name": name,
"imageRef": img,
"flavorRef": flv,
"OS-DCF:diskConfig": dconfig,
"personality": personality,
"networks": networks,
"metadata": metadata,
},
"loadBalancers": lbs,
},
}
new_typ = utils.random_unicode()
new_name = utils.random_unicode()
new_flv = utils.random_unicode()
new_img = utils.random_unicode()
expected = {
"type": new_typ,
"args": {
"server": {
"name": new_name,
"imageRef": new_img,
"flavorRef": new_flv,
},
"loadBalancers": []
}
}
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/launch" % (mgr.uri_base, sg.id)
mgr.replace_launch_config(sg.id, launch_config_type=new_typ,
server_name=new_name, flavor=new_flv, image=new_img)
mgr.api.method_put.assert_called_once_with(uri, body=expected)
def test_mgr_update_launch_metadata(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
orig_meta = {"orig": "orig"}
new_meta = {"new": "new"}
sg.launchConfiguration = {"args": {"server": {"metadata": orig_meta}}}
expected = orig_meta.copy()
expected.update(new_meta)
mgr.update_launch_config = Mock()
mgr.update_launch_metadata(sg.id, new_meta)
mgr.update_launch_config.assert_called_once_with(sg, metadata=expected)
def test_mgr_add_policy(self):
sg = self.scaling_group
mgr = sg.manager
ret_body = {"policies": [{}]}
mgr.api.method_post = Mock(return_value=(None, ret_body))
uri = "/%s/%s/policies" % (mgr.uri_base, sg.id)
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
for is_percent in (True, False):
post_body = {"name": name, "cooldown": cooldown, "type": ptype}
if is_percent:
post_body["changePercent"] = change
else:
post_body["change"] = change
ret = mgr.add_policy(sg, name, ptype, cooldown, change,
is_percent=is_percent)
mgr.api.method_post.assert_called_with(uri, body=[post_body])
self.assert_(isinstance(ret, AutoScalePolicy))
def test_mgr_create_policy_body(self):
sg = self.scaling_group
mgr = sg.manager
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
desired_capacity = utils.random_unicode()
args = utils.random_unicode()
change = utils.random_unicode()
expected_pct = {"name": name,
"cooldown": cooldown,
"type": ptype,
"desiredCapacity": desired_capacity,
"args": args
}
expected_nopct = expected_pct.copy()
expected_pct["changePercent"] = change
expected_nopct["change"] = change
ret_pct = mgr._create_policy_body(name, ptype, cooldown, change=change,
is_percent=True, desired_capacity=desired_capacity, args=args)
ret_nopct = mgr._create_policy_body(name, ptype, cooldown,
change=change, is_percent=False,
desired_capacity=desired_capacity, args=args)
self.assertEqual(ret_nopct, expected_nopct)
self.assertEqual(ret_pct, expected_pct)
def test_mgr_add_policy_desired_capacity(self):
sg = self.scaling_group
mgr = sg.manager
ret_body = {"policies": [{}]}
mgr.api.method_post = Mock(return_value=(None, ret_body))
uri = "/%s/%s/policies" % (mgr.uri_base, sg.id)
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
desired_capacity = utils.random_unicode()
post_body = {
"name": name,
"cooldown": cooldown,
"type": ptype,
"desiredCapacity": desired_capacity,
}
ret = mgr.add_policy(sg, name, ptype, cooldown,
desired_capacity=desired_capacity)
mgr.api.method_post.assert_called_with(uri, body=[post_body])
self.assert_(isinstance(ret, AutoScalePolicy))
def test_mgr_list_policies(self):
sg = self.scaling_group
mgr = sg.manager
ret_body = {"policies": [{}]}
mgr.api.method_get = Mock(return_value=(None, ret_body))
uri = "/%s/%s/policies" % (mgr.uri_base, sg.id)
ret = mgr.list_policies(sg)
mgr.api.method_get.assert_called_once_with(uri)
def test_mgr_get_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
ret_body = {"policy": {}}
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
mgr.api.method_get = Mock(return_value=(None, ret_body))
ret = mgr.get_policy(sg, pol)
self.assert_(isinstance(ret, AutoScalePolicy))
mgr.api.method_get.assert_called_once_with(uri)
def test_mgr_replace_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol_id = utils.random_unicode()
info = {
"name": utils.random_unicode(),
"type": utils.random_unicode(),
"cooldown": utils.random_unicode(),
"change": utils.random_unicode(),
"args": utils.random_unicode(),
}
policy = fakes.FakeAutoScalePolicy(mgr, info, sg)
mgr.get_policy = Mock(return_value=policy)
new_name = utils.random_unicode()
new_type = utils.random_unicode()
new_cooldown = utils.random_unicode()
new_change_percent = utils.random_unicode()
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol_id)
expected = {
"name": new_name,
"type": new_type,
"cooldown": new_cooldown,
"changePercent": new_change_percent,
}
ret = mgr.replace_policy(sg, pol_id, name=new_name,
policy_type=new_type, cooldown=new_cooldown,
change=new_change_percent, is_percent=True)
mgr.api.method_put.assert_called_with(uri, body=expected)
def test_mgr_update_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
args = utils.random_unicode()
mgr.get_policy = Mock(return_value=fakes.FakeAutoScalePolicy(mgr, {},
sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
for is_percent in (True, False):
put_body = {"name": name, "cooldown": cooldown, "type": ptype,
"args": args}
if is_percent:
put_body["changePercent"] = change
else:
put_body["change"] = change
ret = mgr.update_policy(sg, pol, name=name, policy_type=ptype,
cooldown=cooldown, change=change, is_percent=is_percent,
args=args)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_policy_desired_to_desired(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
args = utils.random_unicode()
new_desired_capacity = 10
old_info = {"desiredCapacity": 0}
mgr.get_policy = Mock(
return_value=fakes.FakeAutoScalePolicy(mgr, old_info, sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
put_body = {"name": name, "cooldown": cooldown, "type": ptype,
"desiredCapacity": new_desired_capacity}
ret = mgr.update_policy(sg, pol, name=name, policy_type=ptype,
cooldown=cooldown, desired_capacity=new_desired_capacity)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_policy_change_to_desired(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
args = utils.random_unicode()
new_desired_capacity = 10
old_info = {"change": -1}
mgr.get_policy = Mock(
return_value=fakes.FakeAutoScalePolicy(mgr, old_info, sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
put_body = {"name": name, "cooldown": cooldown, "type": ptype,
"desiredCapacity": new_desired_capacity}
ret = mgr.update_policy(sg, pol, name=name, policy_type=ptype,
cooldown=cooldown, desired_capacity=new_desired_capacity)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_policy_desired_to_change(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
args = utils.random_unicode()
new_change = 1
old_info = {"desiredCapacity": 0}
mgr.get_policy = Mock(
return_value=fakes.FakeAutoScalePolicy(mgr, old_info, sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
put_body = {"name": name, "cooldown": cooldown, "type": ptype,
"change": new_change}
ret = mgr.update_policy(sg, pol, name=name, policy_type=ptype,
cooldown=cooldown, change=new_change)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_policy_maintain_desired_capacity(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
args = utils.random_unicode()
new_name = utils.random_unicode()
old_capacity = 0
old_info = {
"type": ptype,
"desiredCapacity": old_capacity,
"cooldown": cooldown,
}
mgr.get_policy = Mock(
return_value=fakes.FakeAutoScalePolicy(mgr, old_info, sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
put_body = {"name": new_name, "cooldown": cooldown, "type": ptype,
"desiredCapacity": old_capacity}
ret = mgr.update_policy(sg, pol, name=new_name)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_policy_maintain_is_percent(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
new_name = utils.random_unicode()
old_percent = 10
old_info = {
"type": ptype,
"changePercent": old_percent,
"cooldown": cooldown,
}
mgr.get_policy = Mock(
return_value=fakes.FakeAutoScalePolicy(mgr, old_info, sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
put_body = {"name": new_name, "cooldown": cooldown, "type": ptype,
"changePercent": old_percent}
ret = mgr.update_policy(sg, pol, name=new_name)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_policy_maintain_is_absolute(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
name = utils.random_unicode()
ptype = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
new_name = utils.random_unicode()
old_change = 10
old_info = {
"type": ptype,
"change": old_change,
"cooldown": cooldown,
}
mgr.get_policy = Mock(
return_value=fakes.FakeAutoScalePolicy(mgr, old_info, sg))
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
put_body = {"name": new_name, "cooldown": cooldown, "type": ptype,
"change": old_change}
ret = mgr.update_policy(sg, pol, name=new_name)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_execute_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
uri = "/%s/%s/policies/%s/execute" % (mgr.uri_base, sg.id, pol)
mgr.api.method_post = Mock(return_value=(None, None))
mgr.execute_policy(sg, pol)
mgr.api.method_post.assert_called_once_with(uri)
def test_mgr_delete_policy(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
uri = "/%s/%s/policies/%s" % (mgr.uri_base, sg.id, pol)
mgr.api.method_delete = Mock(return_value=(None, None))
mgr.delete_policy(sg, pol)
mgr.api.method_delete.assert_called_once_with(uri)
def test_mgr_add_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = utils.random_unicode()
ret_body = {"webhooks": [{}]}
mgr.api.method_post = Mock(return_value=(None, ret_body))
uri = "/%s/%s/policies/%s/webhooks" % (mgr.uri_base, sg.id, pol)
mgr.get_policy = Mock(return_value=fakes.FakeAutoScalePolicy(mgr, {},
sg))
name = utils.random_unicode()
metadata = utils.random_unicode()
post_body = {"name": name, "metadata": metadata}
ret = mgr.add_webhook(sg, pol, name, metadata=metadata)
mgr.api.method_post.assert_called_with(uri, body=[post_body])
self.assert_(isinstance(ret, AutoScaleWebhook))
def test_mgr_list_webhooks(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
ret_body = {"webhooks": [{}]}
mgr.api.method_get = Mock(return_value=(None, ret_body))
mgr.get_policy = Mock(return_value=fakes.FakeAutoScalePolicy(mgr, {},
sg))
uri = "/%s/%s/policies/%s/webhooks" % (mgr.uri_base, sg.id, pol.id)
ret = mgr.list_webhooks(sg, pol)
mgr.api.method_get.assert_called_once_with(uri)
def test_mgr_get_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
ret_body = {"webhook": {}}
uri = "/%s/%s/policies/%s/webhooks/%s" % (mgr.uri_base, sg.id, pol.id,
hook)
mgr.api.method_get = Mock(return_value=(None, ret_body))
ret = mgr.get_webhook(sg, pol, hook)
self.assert_(isinstance(ret, AutoScaleWebhook))
mgr.api.method_get.assert_called_once_with(uri)
def test_mgr_replace_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
info = {"name": utils.random_unicode(),
"metadata": utils.random_unicode()}
hook_obj = fakes.FakeAutoScaleWebhook(mgr, info, pol, sg)
new_name = utils.random_unicode()
new_metadata = utils.random_unicode()
mgr.get_webhook = Mock(return_value=hook_obj)
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s/webhooks/%s" % (mgr.uri_base, sg.id, pol.id,
hook)
expected = {"name": new_name, "metadata": {}}
ret = mgr.replace_webhook(sg, pol, hook, name=new_name)
mgr.api.method_put.assert_called_with(uri, body=expected)
def test_mgr_update_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
hook_obj = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.get_webhook = Mock(return_value=hook_obj)
mgr.api.method_put = Mock(return_value=(None, None))
uri = "/%s/%s/policies/%s/webhooks/%s" % (mgr.uri_base, sg.id, pol.id,
hook)
put_body = {"name": name, "metadata": metadata}
ret = mgr.update_webhook(sg, pol, hook, name=name, metadata=metadata)
mgr.api.method_put.assert_called_with(uri, body=put_body)
def test_mgr_update_webhook_metadata(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
hook_obj = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
hook_obj.metadata = {"orig": "orig"}
metadata = {"new": "new"}
expected = hook_obj.metadata.copy()
expected.update(metadata)
uri = "/%s/%s/policies/%s/webhooks/%s" % (mgr.uri_base, sg.id, pol.id,
hook)
mgr.update_webhook = Mock()
mgr.get_webhook = Mock(return_value=hook_obj)
mgr.update_webhook_metadata(sg, pol, hook, metadata)
mgr.update_webhook.assert_called_once_with(sg, pol, hook_obj,
metadata=expected)
def test_mgr_delete_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
hook_obj = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
uri = "/%s/%s/policies/%s/webhooks/%s" % (mgr.uri_base, sg.id, pol.id,
hook)
mgr.api.method_delete = Mock(return_value=(None, None))
mgr.get_webhook = Mock(return_value=hook_obj)
mgr.delete_webhook(sg, pol, hook)
mgr.api.method_delete.assert_called_once_with(uri)
def test_mgr_resolve_lbs_dict(self):
sg = self.scaling_group
mgr = sg.manager
key = utils.random_unicode()
val = utils.random_unicode()
lb_dict = {key: val}
ret = mgr._resolve_lbs(lb_dict)
self.assertEqual(ret, [lb_dict])
def test_mgr_resolve_lbs_clb(self):
sg = self.scaling_group
mgr = sg.manager
clb = fakes.FakeLoadBalancer(None, {})
ret = mgr._resolve_lbs(clb)
expected = {"loadBalancerId": clb.id, "port": clb.port}
self.assertEqual(ret, [expected])
def test_mgr_resolve_lbs_tuple(self):
sg = self.scaling_group
mgr = sg.manager
fake_id = utils.random_unicode()
fake_port = utils.random_unicode()
lbs = (fake_id, fake_port)
ret = mgr._resolve_lbs(lbs)
expected = {"loadBalancerId": fake_id, "port": fake_port}
self.assertEqual(ret, [expected])
def test_mgr_resolve_lbs_id(self):
sg = self.scaling_group
mgr = sg.manager
clb = fakes.FakeLoadBalancer(None, {})
sav = pyrax.cloud_loadbalancers
class PyrCLB(object):
def get(self, *args, **kwargs):
return clb
pyrax.cloud_loadbalancers = PyrCLB()
ret = mgr._resolve_lbs("fakeid")
expected = {"loadBalancerId": clb.id, "port": clb.port}
self.assertEqual(ret, [expected])
pyrax.cloud_loadbalancers = sav
def test_mgr_resolve_lbs_id_fail(self):
sg = self.scaling_group
mgr = sg.manager
pyclb = pyrax.cloudloadbalancers
pyclb.get = Mock(side_effect=Exception())
self.assertRaises(exc.InvalidLoadBalancer, mgr._resolve_lbs, "bogus")
def test_mgr_create_body(self):
sg = self.scaling_group
mgr = sg.manager
name = utils.random_unicode()
cooldown = utils.random_unicode()
min_entities = utils.random_unicode()
max_entities = utils.random_unicode()
launch_config_type = utils.random_unicode()
flavor = utils.random_unicode()
disk_config = None
metadata = None
personality = [{"path": "/tmp/testing", "contents": "testtest"}]
scaling_policies = None
networks = utils.random_unicode()
lb = fakes.FakeLoadBalancer()
load_balancers = (lb.id, lb.port)
server_name = utils.random_unicode()
image = utils.random_unicode()
group_metadata = utils.random_unicode()
key_name = utils.random_unicode()
expected = {
"groupConfiguration": {
"cooldown": cooldown,
"maxEntities": max_entities,
"minEntities": min_entities,
"name": name,
"metadata": group_metadata},
"launchConfiguration": {
"args": {
"loadBalancers": [{"loadBalancerId": lb.id,
"port": lb.port}],
"server": {
"flavorRef": flavor,
"imageRef": image,
"metadata": {},
"name": server_name,
"personality": [{"path": "/tmp/testing",
"contents": "dGVzdHRlc3Q="}],
"networks": networks,
"key_name": key_name}
},
"type": launch_config_type},
"scalingPolicies": []}
self.maxDiff = 1000000
ret = mgr._create_body(name, cooldown, min_entities, max_entities,
launch_config_type, server_name, image, flavor,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers,
scaling_policies=scaling_policies,
group_metadata=group_metadata, key_name=key_name)
self.assertEqual(ret, expected)
def test_mgr_create_body_disk_config(self):
sg = self.scaling_group
mgr = sg.manager
name = utils.random_unicode()
cooldown = utils.random_unicode()
min_entities = utils.random_unicode()
max_entities = utils.random_unicode()
launch_config_type = utils.random_unicode()
flavor = utils.random_unicode()
disk_config = utils.random_unicode()
metadata = None
personality = None
scaling_policies = None
networks = utils.random_unicode()
lb = fakes.FakeLoadBalancer()
load_balancers = (lb.id, lb.port)
server_name = utils.random_unicode()
image = utils.random_unicode()
group_metadata = utils.random_unicode()
key_name = utils.random_unicode()
expected = {
"groupConfiguration": {
"cooldown": cooldown,
"maxEntities": max_entities,
"minEntities": min_entities,
"name": name,
"metadata": group_metadata},
"launchConfiguration": {
"args": {
"loadBalancers": [{"loadBalancerId": lb.id,
"port": lb.port}],
"server": {
"OS-DCF:diskConfig": disk_config,
"flavorRef": flavor,
"imageRef": image,
"metadata": {},
"name": server_name,
"networks": networks,
"key_name": key_name}
},
"type": launch_config_type},
"scalingPolicies": []}
self.maxDiff = 1000000
ret = mgr._create_body(name, cooldown, min_entities, max_entities,
launch_config_type, server_name, image, flavor,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers,
scaling_policies=scaling_policies,
group_metadata=group_metadata, key_name=key_name)
self.assertEqual(ret, expected)
def test_policy_init(self):
sg = self.scaling_group
mgr = sg.manager
mgr.get = Mock(return_value=sg)
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg.id)
self.assert_(pol.scaling_group is sg)
def test_policy_get(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
mgr.get_policy = Mock(return_value=pol)
pol.get()
mgr.get_policy.assert_called_once_with(sg, pol)
def test_policy_delete(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
mgr.delete_policy = Mock()
pol.delete()
mgr.delete_policy.assert_called_once_with(sg, pol)
def test_policy_update(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
name = utils.random_unicode()
policy_type = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
is_percent = utils.random_unicode()
desired_capacity = utils.random_unicode()
args = utils.random_unicode()
mgr.update_policy = Mock()
pol.update(name=name, policy_type=policy_type, cooldown=cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
mgr.update_policy.assert_called_once_with(scaling_group=sg,
policy=pol, name=name, policy_type=policy_type,
cooldown=cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def test_policy_execute(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
mgr.execute_policy = Mock()
pol.execute()
mgr.execute_policy.assert_called_once_with(sg, pol)
def test_policy_add_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
mgr.add_webhook = Mock()
name = utils.random_unicode()
metadata = utils.random_unicode()
pol.add_webhook(name, metadata=metadata)
mgr.add_webhook.assert_called_once_with(sg, pol, name,
metadata=metadata)
def test_policy_list_webhooks(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
mgr.list_webhooks = Mock()
pol.list_webhooks()
mgr.list_webhooks.assert_called_once_with(sg, pol)
def test_policy_get_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
mgr.get_webhook = Mock()
pol.get_webhook(hook)
mgr.get_webhook.assert_called_once_with(sg, pol, hook)
def test_policy_update_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update_webhook = Mock()
pol.update_webhook(hook, name=name, metadata=metadata)
mgr.update_webhook.assert_called_once_with(sg, policy=pol, webhook=hook,
name=name, metadata=metadata)
def test_policy_update_webhook_metadata(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update_webhook_metadata = Mock()
pol.update_webhook_metadata(hook, metadata=metadata)
mgr.update_webhook_metadata.assert_called_once_with(sg, pol, hook,
metadata)
def test_policy_delete_webhook(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = utils.random_unicode()
mgr.delete_webhook = Mock()
pol.delete_webhook(hook)
mgr.delete_webhook.assert_called_once_with(sg, pol, hook)
def test_webhook_get(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
pol.get_webhook = Mock()
hook.get()
pol.get_webhook.assert_called_once_with(hook)
def test_webhook_update(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
name = utils.random_unicode()
metadata = utils.random_unicode()
pol.update_webhook = Mock()
hook.update(name=name, metadata=metadata)
pol.update_webhook.assert_called_once_with(hook, name=name,
metadata=metadata)
def test_webhook_update_metadata(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
metadata = utils.random_unicode()
pol.update_webhook_metadata = Mock()
hook.update_metadata(metadata=metadata)
pol.update_webhook_metadata.assert_called_once_with(hook,
metadata)
def test_webhook_delete(self):
sg = self.scaling_group
mgr = sg.manager
pol = fakes.FakeAutoScalePolicy(mgr, {}, sg)
hook = fakes.FakeAutoScaleWebhook(mgr, {}, pol, sg)
pol.delete_webhook = Mock()
hook.delete()
pol.delete_webhook.assert_called_once_with(hook)
def test_clt_get_state(self):
clt = fakes.FakeAutoScaleClient()
sg = self.scaling_group
mgr = clt._manager
mgr.get_state = Mock()
clt.get_state(sg)
mgr.get_state.assert_called_once_with(sg)
def test_clt_pause(self):
clt = fakes.FakeAutoScaleClient()
sg = self.scaling_group
mgr = clt._manager
mgr.pause = Mock()
clt.pause(sg)
mgr.pause.assert_called_once_with(sg)
def test_clt_resume(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.resume = Mock()
clt.resume(sg)
mgr.resume.assert_called_once_with(sg)
def test_clt_replace(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
name = utils.random_unicode()
cooldown = utils.random_unicode()
min_entities = utils.random_unicode()
max_entities = utils.random_unicode()
metadata = utils.random_unicode()
mgr.replace = Mock()
clt.replace(sg, name, cooldown, min_entities, max_entities,
metadata=metadata)
mgr.replace.assert_called_once_with(sg, name, cooldown, min_entities,
max_entities, metadata=metadata)
def test_clt_update(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
name = utils.random_unicode()
cooldown = utils.random_unicode()
min_entities = utils.random_unicode()
max_entities = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update = Mock()
clt.update(sg, name=name, cooldown=cooldown, min_entities=min_entities,
max_entities=max_entities, metadata=metadata)
mgr.update.assert_called_once_with(sg, name=name, cooldown=cooldown,
min_entities=min_entities, max_entities=max_entities,
metadata=metadata)
def test_clt_update_metadata(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
metadata = utils.random_unicode()
mgr.update_metadata = Mock()
clt.update_metadata(sg, metadata)
mgr.update_metadata.assert_called_once_with(sg, metadata)
def test_clt_get_configuration(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.get_configuration = Mock()
clt.get_configuration(sg)
mgr.get_configuration.assert_called_once_with(sg)
def test_clt_get_launch_config(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.get_launch_config = Mock()
clt.get_launch_config(sg)
mgr.get_launch_config.assert_called_once_with(sg)
def test_clt_replace_launch_config(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.replace_launch_config = Mock()
launch_config_type = utils.random_unicode()
server_name = utils.random_unicode()
image = utils.random_unicode()
flavor = utils.random_unicode()
disk_config = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode()
networks = utils.random_unicode()
load_balancers = utils.random_unicode()
key_name = utils.random_unicode()
clt.replace_launch_config(sg, launch_config_type, server_name, image,
flavor, disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name)
mgr.replace_launch_config.assert_called_once_with(sg,
launch_config_type, server_name, image, flavor,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name)
def test_clt_update_launch_config(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.update_launch_config = Mock()
server_name = utils.random_unicode()
flavor = utils.random_unicode()
image = utils.random_unicode()
disk_config = utils.random_unicode()
metadata = utils.random_unicode()
personality = utils.random_unicode()
networks = utils.random_unicode()
load_balancers = utils.random_unicode()
key_name = utils.random_unicode()
user_data = utils.random_unicode()
config_drive = utils.random_unicode()
clt.update_launch_config(sg, server_name=server_name, flavor=flavor,
image=image, disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
mgr.update_launch_config.assert_called_once_with(sg,
server_name=server_name, flavor=flavor, image=image,
disk_config=disk_config, metadata=metadata,
personality=personality, networks=networks,
load_balancers=load_balancers, key_name=key_name,
config_drive=config_drive, user_data=user_data)
def test_clt_update_launch_metadata(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.update_launch_metadata = Mock()
metadata = utils.random_unicode()
clt.update_launch_metadata(sg, metadata)
mgr.update_launch_metadata.assert_called_once_with(sg, metadata)
def test_clt_add_policy(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
name = utils.random_unicode()
policy_type = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
is_percent = utils.random_unicode()
desired_capacity = utils.random_unicode()
args = utils.random_unicode()
mgr.add_policy = Mock()
clt.add_policy(sg, name, policy_type, cooldown, change,
is_percent=is_percent, desired_capacity=desired_capacity,
args=args)
mgr.add_policy.assert_called_once_with(sg, name, policy_type, cooldown,
change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def test_clt_list_policies(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
mgr.list_policies = Mock()
clt.list_policies(sg)
mgr.list_policies.assert_called_once_with(sg)
def test_clt_get_policy(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
mgr.get_policy = Mock()
clt.get_policy(sg, pol)
mgr.get_policy.assert_called_once_with(sg, pol)
def test_clt_replace_policy(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
name = utils.random_unicode()
policy_type = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
is_percent = utils.random_unicode()
desired_capacity = utils.random_unicode()
args = utils.random_unicode()
mgr.replace_policy = Mock()
clt.replace_policy(sg, pol, name, policy_type, cooldown, change=change,
is_percent=is_percent, desired_capacity=desired_capacity,
args=args)
mgr.replace_policy.assert_called_once_with(sg, pol, name, policy_type,
cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
def test_clt_update_policy(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
name = utils.random_unicode()
policy_type = utils.random_unicode()
cooldown = utils.random_unicode()
change = utils.random_unicode()
is_percent = utils.random_unicode()
desired_capacity = utils.random_unicode()
args = utils.random_unicode()
mgr.update_policy = Mock()
clt.update_policy(sg, pol, name=name, policy_type=policy_type,
cooldown=cooldown, change=change, is_percent=is_percent,
desired_capacity=desired_capacity, args=args)
mgr.update_policy.assert_called_once_with(sg, pol, name=name,
policy_type=policy_type, cooldown=cooldown, change=change,
is_percent=is_percent, desired_capacity=desired_capacity,
args=args)
def test_clt_execute_policy(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
mgr.execute_policy = Mock()
clt.execute_policy(sg, pol)
mgr.execute_policy.assert_called_once_with(scaling_group=sg, policy=pol)
def test_clt_delete_policy(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
mgr.delete_policy = Mock()
clt.delete_policy(sg, pol)
mgr.delete_policy.assert_called_once_with(scaling_group=sg, policy=pol)
def test_clt_add_webhook(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.add_webhook = Mock()
clt.add_webhook(sg, pol, name, metadata=metadata)
mgr.add_webhook.assert_called_once_with(sg, pol, name,
metadata=metadata)
def test_clt_list_webhooks(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
mgr.list_webhooks = Mock()
clt.list_webhooks(sg, pol)
mgr.list_webhooks.assert_called_once_with(sg, pol)
def test_clt_get_webhook(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
hook = utils.random_unicode()
mgr.get_webhook = Mock()
clt.get_webhook(sg, pol, hook)
mgr.get_webhook.assert_called_once_with(sg, pol, hook)
def test_clt_replace_webhook(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
hook = utils.random_unicode()
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.replace_webhook = Mock()
clt.replace_webhook(sg, pol, hook, name, metadata=metadata)
mgr.replace_webhook.assert_called_once_with(sg, pol, hook, name,
metadata=metadata)
def test_clt_update_webhook(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
hook = utils.random_unicode()
name = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update_webhook = Mock()
clt.update_webhook(sg, pol, hook, name=name, metadata=metadata)
mgr.update_webhook.assert_called_once_with(scaling_group=sg, policy=pol,
webhook=hook, name=name, metadata=metadata)
def test_clt_update_webhook_metadata(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
hook = utils.random_unicode()
metadata = utils.random_unicode()
mgr.update_webhook_metadata = Mock()
clt.update_webhook_metadata(sg, pol, hook, metadata)
mgr.update_webhook_metadata.assert_called_once_with(sg, pol, hook,
metadata)
def test_clt_delete_webhook(self):
clt = fakes.FakeAutoScaleClient()
mgr = clt._manager
sg = self.scaling_group
pol = utils.random_unicode()
hook = utils.random_unicode()
mgr.delete_webhook = Mock()
clt.delete_webhook(sg, pol, hook)
mgr.delete_webhook.assert_called_once_with(sg, pol, hook)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
krmahadevan/selenium | py/selenium/webdriver/remote/mobile.py | 50 | 2651 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .command import Command
class Mobile(object):
class ConnectionType(object):
def __init__(self, mask):
self.mask = mask
@property
def airplane_mode(self):
return self.mask % 2 == 1
@property
def wifi(self):
return (self.mask / 2) % 2 == 1
@property
def data(self):
return (self.mask / 4) > 0
ALL_NETWORK = ConnectionType(6)
WIFI_NETWORK = ConnectionType(2)
DATA_NETWORK = ConnectionType(4)
AIRPLANE_MODE = ConnectionType(1)
def __init__(self, driver):
self._driver = driver
@property
def network_connection(self):
return self.ConnectionType(self._driver.execute(Command.GET_NETWORK_CONNECTION)['value'])
def set_network_connection(self, network):
"""
Set the network connection for the remote device.
Example of setting airplane mode::
driver.mobile.set_network_connection(driver.mobile.AIRPLANE_MODE)
"""
mode = network.mask if isinstance(network, self.ConnectionType) else network
return self.ConnectionType(self._driver.execute(
Command.SET_NETWORK_CONNECTION, {
'name': 'network_connection',
'parameters': {'type': mode}})['value'])
@property
def context(self):
"""
returns the current context (Native or WebView).
"""
return self._driver.execute(Command.CURRENT_CONTEXT_HANDLE)
@property
def contexts(self):
"""
returns a list of available contexts
"""
return self._driver.execute(Command.CONTEXT_HANDLES)
@context.setter
def context(self, new_context):
"""
sets the current context
"""
self._driver.execute(Command.SWITCH_TO_CONTEXT, {"name": new_context})
| apache-2.0 |
AlexGiroud/huby | ControleursPython/testSerial.py | 2 | 1340 | #!/usr/bin/python
import serial
import http.client, urllib.request
from time import localtime, strftime
import re
import time
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
port = serial.Serial(config["SERIAL"]["Interface"], baudrate=9600, timeout=3.0)
#port = serial.Serial("ttyAMA0", baudrate=9600, timeout=3.0)
prog = re.compile("^[A-Z0-9a-z]{12}$")
while True:
rcv = port.read(14)
recivedString = rcv.decode('utf-8')
recivedString = recivedString[1:-1]
print(recivedString)
if prog.match(recivedString):
print("matched")
urllib.request.urlopen(config["SERVER"]["Url"]+"AddEntry.php?flow=in")
#params = urllib.parse.urlencode({'flow': 'in'})
#headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"}
#conn = http.client.HTTPConnection(config["SERVER"]["Url"]+"AddEntry.php:80")
#try:
# conn.request("GET", "", params, headers)
# response = conn.getresponse()
# data = response.read()
# conn.close()
#except:
# print("Connection failed")
# test = urllib.urlopen(config["SERVER"]["Url"]+"action.controller.php?action=motion")
time.sleep(1)
port.flushInput()
else:
print('invalid rfid detection')
| gpl-3.0 |
sadanandb/pmt | src/pyasm/web/cherrypy30_adapter.py | 5 | 4912 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['CherryPyException', 'CherryPyAdapter']
import types, os, re
from pyasm.common import TacticException
from web_environment import *
import cherrypy
class CherryPyException(Exception):
pass
def get_app_server():
server_cls = os.environ.get("TACTIC_SERVER_CLS")
if not server_cls:
from app_server import BaseAppServer
base_cls = BaseAppServer
elif server_cls == "pyasm.web.WidgetAppServer":
from widget_app_server import WidgetAppServer
base_cls = WidgetAppServer
else:
from simple_app_server import SimpleAppServer
base_cls = SimpleAppServer
class CherryPyAppServer(base_cls):
def get_adapter(my):
adapter = CherryPyAdapter()
return adapter
@cherrypy.expose()
def index(my, **kwargs):
my.hash = ()
return my.get_display()
# set the hash object as a list
@cherrypy.expose()
def default(my, *vpath, **kwargs):
my.hash = vpath
return my.get_display()
return CherryPyAppServer
def get_xmlrpc_server():
'''dynamically load in an xmlrpc server'''
from cherrypy import _cptools
class XmlrpcServer(_cptools.XMLRPCController):
def get_adapter(my):
adapter = CherryPyAdapter()
return adapter
return XmlrpcServer
from cherrypy_adapter import CherryPyAdapter as CherryPyAdapter20
class CherryPyAdapter(CherryPyAdapter20):
"""Encapsulates cherrypy environment. Implements the web interface"""
def __init__(my):
my.request = cherrypy.request
my.response = cherrypy.response
#my.request.wsgi_environ['REQUEST_URI'] = my.request.browser_url
my.request.wsgi_environ['REQUEST_URI'] = cherrypy.url()
def get_context_name(my):
'''this includes all of the subdirectories as well as the main
context'''
dir = my.request.path_info
p = re.compile( r"/(tactic|projects)/?(\w+)/")
m = p.search(dir)
if not m:
return "default"
context = m.groups()[1]
return context
def get_request_method(my):
return my.request.method
def get_request(my):
return my.request
def get_request_headers(my):
return my.request.headers
def get_response(my):
return my.response
def set_header(my, name, value):
my.response.headers[name] = value
def get_response(my):
return my.response
def set_content_type(my, content_type):
my.response.headers['Content-Type'] = content_type
def get_content_type(my, content_type):
return my.response.headers['Content-Type']
def set_force_download(my, filename):
my.response.headers['Content-Type'] = "application/force-download"
my.response.headers['Content-Disposition'] = "attachment; filename=%s" % filename
def set_csv_download(my, filename):
filename = os.path.basename(filename)
my.response.headers['Content-Type'] = "text/x-csv"
my.response.headers['Content-Disposition'] = "attachment; filename=%s" % filename
# form submission functions
def reset_form(my):
my.request.params = {}
def get_form_keys(my):
return my.request.params.keys()
def has_form_key(my, key):
return my.request.params.has_key(key)
def set_form_value(my, name, value):
'''Set the form value to appear like it was submitted'''
# protect from accidental null names. This can occur when an
# input widget has not name specified.
if not name:
return
my.request.params[name] = value
def get_form_data(my):
return my.request.params
# cookie functions
def set_cookie(my, name, value):
'''set a cookie'''
cherrypy.response.cookie[name] = value
cherrypy.response.cookie[name]['path'] = '/'
cherrypy.response.cookie[name]['max-age'] = 120*3600
def get_cookie(my, name):
'''get a cookie'''
try:
return cherrypy.request.cookie[name].value
except KeyError, e:
return ""
def get_cookies(my):
'''get a cookies'''
return cherrypy.request.cookie
# environment functions
"""
def get_env_keys(my):
env = my.request.wsgi_environ
return env.keys()
def get_env(my, env_var):
env = my.request.wsgi_environ
return env.get(env_var)
"""
| epl-1.0 |
dzonerzy/Fridafy | fridafy/fridafy.py | 1 | 17763 | """
MIT License
Copyright (c) 2017 Daniele Linguaglossa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from pyv8._PyV8 import JSFunction
from threading import Thread
from pyv8.PyV8 import *
import argparse
import frida
import time
import re
__all__ = ["version", "FridafyEngine"]
version = "0.1"
class Injector(object):
def __init__(self, process=None, script=None, message_callback=None):
self.process = process
self.script = script
self.message_callback = message_callback
self.frida_process = None
self.frida_script = None
self.running = True
def on_message(self, message, data):
if callable(self.message_callback):
return self.message_callback(message, data)
else:
if message['type'] == 'send':
if type(message['payload']) not in [dict, list, int, float, bool, type(None)]:
print("[*] {0}".format(message['payload'].encode('utf-8')))
else:
print("[*] {0}".format(message['payload']))
else:
print("[FridaError] {0}".format(message["description"].encode("utf-8")))
def set_script(self, script):
self.script = script
def set_process(self, process):
self.process = process
def set_message_callback(self, message_callback):
self.message_callback = message_callback
def get_script(self):
return self.script
def attach(self):
if self.process:
attached = False
while not attached:
try:
process = frida.get_usb_device().attach(self.process)
self.frida_process = process
return
except Exception:
pass
else:
print("[-] Error a process must be specified!")
def stop(self):
self.running = False
def start(self):
t = Thread(target=self._init_hook, args=())
t.setDaemon(True)
t.start()
def _init_hook(self):
self.attach()
try:
self.frida_script = self.frida_process.create_script(self.get_script())
self.frida_script.on('message', self.on_message)
self.frida_script.load()
except frida.InvalidArgumentError as e:
message = e.args[0]
line = re.compile('Script\(line (\d+)\)')
line = int(line.findall(message)[0])
script = self.get_script().split("\n")
if line > 0 and line - len(script)-1:
lines = script[line-1] + "\n" "=> {0}".format(script[line]) + "\n" + script[line+1]
else:
lines = "=> {0}".format(script[line])
print "[-] Error on line {0}:\n{1}: \n\n{2}".format(line, line, lines)
except Exception as e:
print("[-] Something weird happened during initialization: {0}".format(e))
while self.running:
try:
pass
except KeyboardInterrupt:
self.running = False
class FridaHelper(JSClass):
injector = Injector()
overloads = []
script_overloads = """
Java.perform(function() {
var className = Java.use('%s');
var x = className.%s.overload('int','int','int','int','int','int','int','int','int','int','int','int','int','int');
});
"""
overload_re = re.compile('\((.*)\)')
script_base = """
Java.perform(function() {{
send("Proudly powered by Fridafy v{0}");
%s
}});
""".format(version)
class_index = 1
hooks = []
has_class_created= False
class_created = ""
has_object_created = False
object_created = ""
has_dump_support = False
dump_support = """
function dump(buffer, length)
{
try
{
return "\\n" + hexdump(buffer, { offset: 0, length: length, header: true, ansi: true });
}catch(e){
if(typeof buffer != "string")
{
var result = "";
for (var i = 0; i < buffer.length; i++) {
result += String.fromCharCode(((buffer[i]%256)+256)%256);
}
buffer = result;
}
blockSize = 16;
var lines = [];
var hex = "0123456789ABCDEF";
for (var b = 0; b < buffer.length; b += blockSize) {
var block = buffer.slice(b, Math.min(b + blockSize, buffer.length));
var addr = ("0000" + b.toString(16)).slice(-4);
var codes = block.split('').map(function (ch) {
var code = ch.charCodeAt(0);
return " " + hex[(0xF0 & code) >> 4] + hex[0x0F & code];
}).join("");
codes += " ".repeat(blockSize - block.length);
var chars = block.replace(/[\\x00-\\x1F\\x20]/g, '.');
chars += " ".repeat(blockSize - block.length);
lines.push(addr + " " + codes + " " + chars);
}
return "\\n"+lines.join("\\n");
}
}
"""
has_callstack_support = False
callstack_support = """
function callstack(shouldPrint)
{
var trace = [];
var stack = ThreadObj.currentThread().getStackTrace();
if(shouldPrint)
{
send("------------------------------ Call Stack ---------------------------------");
for(var i=0; i<stack.length; i++)
{
send(i + " => " + stack[i].toString());
}
send("--------------------------------------------------------------------------");
}else{
for(var i=0; i<stack.length; i++)
{
trace.push(stack[i].toString());
}
return trace;
}
}
"""
has_bin2str_support = False
bin2str_support = """
function bin2str(array) {
var result = "";
for (var i = 0; i < array.length; i++) {
result += String.fromCharCode(modulus(array[i], 256));
}
return result;
}
function modulus(x, n)
{
return ((x%n)+n)%n;
}
"""
has_save2disk_support = False
save2disk_support = """
function save2disk(path, data)
{
var dumpFile = new File(path, "wb");
dumpFile.write(data);
dumpFile.close();
}
"""
@property
def CLASS_CONSTRUCTOR(self):
return "$init"
def _on_message_overloads(self, message, data):
if message['type'] == 'send':
if type(message['payload']) not in [dict, list, int, float, bool, type(None)]:
print("[*] {0}".format(message['payload'].encode('utf-8')))
else:
print("[*] {0}".format(message['payload']))
else:
if "does not match any of" in message["description"]:
self.overloads = message["description"].split("\n\t")[1:]
else:
print("[-] {0}".format(message["description"].encode("utf-8")))
def find_overloads(self, process_name, class_name, method_name):
self.injector.set_script(self.script_overloads % (class_name, method_name))
self.injector.set_process(process_name)
self.injector.set_message_callback(self._on_message_overloads)
self.injector.start()
while not len(self.overloads) > 0:
pass
self.injector.stop()
overloads = self.overloads
self.overloads = []
return overloads
def _fake_parameters_type(self, overload):
parameters = self.overload_re.findall(overload)[0]
if len(parameters) > 1:
overload_prams = parameters.strip()
return overload_prams.replace("'", "")
else:
return ""
def _fake_parameters(self, overload):
params = []
param_index = 1
parameters = self.overload_re.findall(overload)[0]
if len(parameters) > 1:
overload_prams = parameters.strip().split(",")
for _ in overload_prams:
params.append("param{0}".format(param_index))
param_index += 1
return params
else:
return params
def create_global(self, variable_name, obj):
representation = {
int: lambda x: x,
float: lambda x: x,
bool: lambda x: x,
type(None): lambda x: x,
unicode: lambda x: x.encode("utf-8"),
JSArray: lambda x: "new Array()",
JSFunction: lambda x: str(x),
}
self.has_object_created = True
self.object_created += """
try
{
var %s = %s;
}catch(e){
send(e.toString());
}
""" % (variable_name, representation[type(obj)](obj))
def create_global_class(self, variable_name, class_name):
self.has_class_created = True
self.class_created += """
try
{
var %s = Java.use('%s').$new();
}catch(e){
send(e.toString());
}
""" % (variable_name, class_name)
def support_bin2str(self, flag):
self.has_bin2str_support = flag
def support_save2disk(self, flag):
self.has_save2disk_support = flag
def support_dump(self, flag):
self.has_dump_support = flag
def support_callstack(self, flag):
if flag:
self.create_global_class("ThreadObj", "java.lang.Thread")
self.has_callstack_support = flag
def find_and_hook_method(self, process_name, class_name, method, callback):
hook_string = ""
method_index = 1
hook_string += "var class{0} = Java.use('{1}');\n".format(self.class_index, class_name)
overloads = self.find_overloads(process_name, class_name, method)
for overload in overloads:
params = self._fake_parameters(overload)
params_type = self._fake_parameters_type(overload)
signature = "{0}.{1}({2})".format(class_name, method, params_type)
hook_string += "var class{0}_method{1} = class{2}.{3}{4};\n\n".format(self.class_index, method_index,
self.class_index, method, overload)
hook_string += "class%s_method%s.implementation = function(%s){\n" % (self.class_index, method_index,
",".join(params))
callback_params = []
if params_type:
params_type = params_type.split(",")
params_type = [x.strip() for x in params_type]
for i in range(0, len(params)):
if len(params):
callback_params.append('{"type": "%s", "value": %s}' % (params_type[i], params[i]))
callback_data = '{"signature": "%s", "params": [%s], "is_result": false, "result": ""};\n' % (signature,
",".join(callback_params))
hook_string += "var signature = {0}".format(callback_data)
hook_string += "{0}(signature);\n".format(callback.name)
hook_string += "var ret = class%s_method%s.call(this%s);\n" % (self.class_index, method_index,
"," +",".join(params)
if len(params) > 0 else "")
hook_string += "signature[\"is_result\"] = true;\n"
hook_string += "signature[\"result\"] = ret;\n"
hook_string += "{0}(signature);\n".format(callback.name)
hook_string += "return ret;\n}\n"
method_index += 1
hook_string += str(callback) + "\n"
self.class_index += 1
self.hooks.append(hook_string)
def find_and_hook_native(self, library_name, method_name, callback):
hook_string = """
Interceptor.attach(Module.findExportByName("%s", "%s"), {
onEnter: function(args)
{
%s({"value": null, "is_result": false, "args": args});
},
onLeave: function(retval)
{
%s({"value": retval, "is_result": true, "args": null});
}
});
%s\n
""" % (library_name, method_name, str(callback.name), str(callback.name), str(callback))
print hook_string
self.hooks.append(hook_string)
def reset(self):
self.has_bin2str_support = False
self.has_object_created = False
self.has_callstack_support = False
self.has_class_created = False
self.has_hexdump_support = False
self.class_created = ""
self.object_created = ""
self.hooks = []
def stop(self):
self.injector.stop()
def start(self, process_name):
full_script = ""
if self.has_class_created:
full_script += self.class_created + "\n"
if self.has_object_created:
full_script += self.object_created + "\n"
if self.has_dump_support:
full_script += self.dump_support + "\n"
if self.has_callstack_support:
full_script += self.callstack_support + "\n"
if self.has_bin2str_support:
full_script += self.bin2str_support + "\n"
if self.has_save2disk_support:
full_script += self.save2disk_support + "\n"
full_script += "".join(self.hooks)
script = self.script_base % (full_script)
self.injector.set_script(script)
self.injector.set_process(process_name)
self.injector.start()
class Global():
def FridaHelper(self):
return FridaHelper()
def sleep(self, milliseconds):
time.sleep(milliseconds/1000)
def send(self, text):
print("[*] {0}".format(text))
def callstack(self, *args):
pass
def hexdump(self, *args):
pass
def bin2str(self, *args):
pass
class FridafyEngine(object):
def __init__(self):
self.message_callback = None
self.cursor = "[JS]> "
self.use_previous = False
self.global_script = ""
def set_message_callback(self, callback):
if callable(callback):
self.message_callback = callback
def execute(self, script):
with JSContext(Global()) as ctx:
try:
ctx.eval(script)
except JSError as e:
if self.message_callback:
self.message_callback({"type": "error", "description": str(e)})
else:
print("{0}: {1}".format(e.name, e.message))
def interact(self):
with JSContext(Global()) as ctx:
while True:
try:
script = raw_input(self.cursor)
try:
if self.use_previous:
res = ctx.eval(self.global_script + script)
else:
res = ctx.eval(script)
if res is not None:
print str(res).encode('utf-8')
else:
if script:
print "undefined"
self.use_previous = False
self.cursor = "[JS]> "
except Exception as e:
if e.message == "Unexpected end of input":
self.global_script += script
self.cursor = "... "
self.use_previous = True
else:
self.use_previous = False
self.global_script = ""
print "{0}: {1}".format(e.name, e.message)
except KeyboardInterrupt:
print
except EOFError:
break
def main():
parser = argparse.ArgumentParser(description="Fridafy Engine")
parser.add_argument("-s", metavar="script", help="Script to run", required=False, dest="script")
args = parser.parse_args()
if args.script:
script = ""
try:
with open(args.script, "r") as script_file:
script = script_file.read()
script_file.close()
except IOError:
print("Error: Unable to find script file")
def on_message(data):
if data["type"] == "error":
print(data["description"])
exit(0)
if(script):
engine = FridafyEngine()
engine.set_message_callback(on_message)
print("[*] Waiting for application...")
engine.execute(script)
try:
raw_input()
except (KeyboardInterrupt, EOFError):
print("[-] Exiting...")
else:
print("[*] Launching FridaEngine interactive console - press CTRL+D to exit\n")
engine = FridafyEngine()
engine.interact()
if __name__ == "__main__":
main()
| mit |
schlos/eden | tests/unit_tests/modules/s3/s3gis/GeoRSSLayer.py | 43 | 2220 |
s3gis_tests = load_module("tests.unit_tests.modules.s3.s3gis")
def test_GeoRSSLayer():
# use debug to show up errors
# without debug, errors get to be turned into session warnings
# and the layer skipped altogether. No datastructure.
url = "test://test_GeoRSS"
current.request.utcnow = datetime.datetime.now()
test_utils.clear_table(db, db.gis_cache)
db.gis_cache.insert(
modified_on = datetime.datetime.now(),
source = url
)
db.commit()
current.session.s3.debug = True
s3gis_tests.layer_test(
db,
db.gis_layer_georss,
dict(
name = "Test GeoRSS",
description = "Test GeoRSS layer",
enabled = True,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now(),
url = url,
),
"S3.gis.layers_georss",
[
{
"marker_height": 34,
"marker_image": u"gis_marker.image.marker_red.png",
"marker_width": 20,
"name": u"Test GeoRSS",
"url": u"/eden/gis/cache_feed.geojson?cache.source=test://test_GeoRSS"
}
],
session = session,
request = request,
)
test_utils = local_import("test_utils")
s3gis = local_import("s3.s3gis")
def test_no_cached_copy_available():
test_utils.clear_table(db, db.gis_cache)
current.request.utcnow = datetime.datetime.now()
current.session.s3.debug = True
gis = s3gis.GIS()
with s3gis_tests.InsertedRecord(
db,
db.gis_layer_georss,
dict(
name = "Test GeoRSS",
description = "Test GeoRSS layer",
enabled = True,
created_on = datetime.datetime.now(),
modified_on = datetime.datetime.now(),
url = "test://test_GeoRSS",
)
):
with s3gis_tests.ExpectedException(Exception):
gis.show_map(
window = True,
catalogue_toolbar = True,
toolbar = True,
search = True,
catalogue_layers = True,
projection = 900913,
)
| mit |
ryanraaum/oldowan.display | setup.py | 1 | 1204 | from setuptools import setup, find_packages
import sys, os
PACKAGE = 'display'
VERSION = open(os.path.join(os.path.dirname(os.path.realpath(__file__)),'oldowan', PACKAGE, 'VERSION')).read().strip()
desc_lines = open('README', 'r').readlines()
setup(name='oldowan.%s' % PACKAGE,
version=VERSION,
description=desc_lines[0],
long_description=''.join(desc_lines[2:]),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics"
],
keywords='',
platforms=['Any'],
author='Ryan Raaum',
author_email='code@raaum.org',
url='http://www.raaum.org/software/oldowan',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=False,
namespace_packages = ['oldowan'],
data_files=[("oldowan/%s" % PACKAGE, ["oldowan/%s/VERSION" % PACKAGE])],
zip_safe=False,
test_suite = 'nose.collector',
)
| mit |
Johnetordoff/osf.io | osf_tests/test_draft_registration.py | 3 | 30690 | import mock
import pytest
import datetime
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from osf.exceptions import UserNotAffiliatedError, DraftRegistrationStateError, NodeStateError
from osf.models import RegistrationSchema, DraftRegistration, DraftRegistrationContributor, NodeLicense, Node, NodeLog
from osf.utils.permissions import ADMIN, READ, WRITE
from osf_tests.test_node import TestNodeEditableFieldsMixin, TestTagging, TestNodeSubjects
from osf_tests.test_node_license import TestNodeLicenses
from website import settings
from . import factories
pytestmark = pytest.mark.django_db
@pytest.fixture()
def user():
return factories.UserFactory()
@pytest.fixture()
def project(user, auth, fake):
ret = factories.ProjectFactory(creator=user)
ret.add_tag(fake.word(), auth=auth)
return ret
@pytest.fixture()
def auth(user):
return Auth(user)
@pytest.fixture()
def draft_registration(project):
return factories.DraftRegistrationFactory(branched_from=project)
class TestDraftRegistrations:
# copied from tests/test_registrations/test_models.py
def test_factory(self):
draft = factories.DraftRegistrationFactory()
assert draft.branched_from is not None
assert draft.initiator is not None
assert draft.registration_schema is not None
user = factories.UserFactory()
draft = factories.DraftRegistrationFactory(initiator=user)
assert draft.initiator == user
node = factories.ProjectFactory()
draft = factories.DraftRegistrationFactory(branched_from=node)
assert draft.branched_from == node
assert draft.initiator == node.creator
# Pick an arbitrary v2 schema
schema = RegistrationSchema.objects.filter(schema_version=2).first()
data = {'some': 'data'}
draft = factories.DraftRegistrationFactory(registration_schema=schema, registration_metadata=data)
assert draft.registration_schema == schema
assert draft.registration_metadata == data
@mock.patch('website.settings.ENABLE_ARCHIVER', False)
def test_register(self):
user = factories.UserFactory()
auth = Auth(user)
project = factories.ProjectFactory(creator=user)
draft = factories.DraftRegistrationFactory(branched_from=project)
assert not draft.registered_node
draft.register(auth)
assert draft.registered_node
# group member with admin access cannot register
member = factories.AuthUserFactory()
osf_group = factories.OSFGroupFactory(creator=user)
osf_group.make_member(member, auth=auth)
project.add_osf_group(osf_group, ADMIN)
draft_2 = factories.DraftRegistrationFactory(branched_from=project)
assert project.has_permission(member, ADMIN)
with pytest.raises(PermissionsError):
draft_2.register(Auth(member))
assert not draft_2.registered_node
@mock.patch('website.settings.ENABLE_ARCHIVER', False)
def test_register_no_title_fails(self):
user = factories.UserFactory()
auth = Auth(user)
project = factories.ProjectFactory(creator=user)
draft = factories.DraftRegistrationFactory(branched_from=project)
draft.title = ''
draft.save()
with pytest.raises(NodeStateError) as e:
draft.register(auth)
assert str(e.value) == 'Draft Registration must have title to be registered'
def test_update_metadata_updates_registration_responses(self, project):
schema = RegistrationSchema.objects.get(
name='OSF-Standard Pre-Data Collection Registration',
schema_version=2
)
draft = factories.DraftRegistrationFactory(registration_schema=schema, branched_from=project)
new_metadata = {
'looked': {
'comments': [],
'value': 'Yes',
'extra': []
},
'datacompletion': {
'comments': [],
'value': 'No, data collection has not begun',
'extra': []
},
'comments': {
'comments': [],
'value': '',
'extra': []
}
}
draft.update_metadata(new_metadata)
draft.save()
# To preserve both workflows, if update_metadata is called,
# a flattened version of that metadata is stored in
# registration_responses
assert draft.registration_responses == {
'looked': 'Yes',
'datacompletion': 'No, data collection has not begun',
'comments': ''
}
def test_update_metadata_tracks_changes(self, project):
draft = factories.DraftRegistrationFactory(branched_from=project)
draft.registration_metadata = {
'foo': {
'value': 'bar',
},
'a': {
'value': 1,
},
'b': {
'value': True
},
}
changes = draft.update_metadata({
'foo': {
'value': 'foobar',
},
'a': {
'value': 1,
},
'b': {
'value': True,
},
'c': {
'value': 2,
},
})
draft.save()
for key in ['foo', 'c']:
assert key in changes
def test_update_registration_responses(self, project):
schema = RegistrationSchema.objects.get(
name='OSF-Standard Pre-Data Collection Registration',
schema_version=2
)
draft = factories.DraftRegistrationFactory(registration_schema=schema, branched_from=project)
registration_responses = {
'looked': 'Yes',
'datacompletion': 'No, data collection has not begun',
'comments': ''
}
draft.update_registration_responses(registration_responses)
draft.save()
# To preserve both workflows, if update_metadata is called,
# a flattened version of that metadata is stored in
# registration_responses
assert draft.registration_metadata == {
'looked': {
'comments': [],
'value': 'Yes',
'extra': []
},
'datacompletion': {
'comments': [],
'value': 'No, data collection has not begun',
'extra': []
},
'comments': {
'comments': [],
'value': '',
'extra': []
}
}
def test_has_active_draft_registrations(self):
project, project2 = factories.ProjectFactory(), factories.ProjectFactory()
factories.DraftRegistrationFactory(branched_from=project)
assert project.has_active_draft_registrations is True
assert project2.has_active_draft_registrations is False
def test_draft_registrations_active(self):
project = factories.ProjectFactory()
registration = factories.RegistrationFactory(project=project)
deleted_registration = factories.RegistrationFactory(project=project)
deleted_registration.is_deleted = True
deleted_registration.save()
draft = factories.DraftRegistrationFactory(branched_from=project, user=project.creator)
draft2 = factories.DraftRegistrationFactory(branched_from=project, user=project.creator)
draft2.registered_node = deleted_registration
draft2.save()
finished_draft = factories.DraftRegistrationFactory(branched_from=project, user=project.creator)
finished_draft.registered_node = registration
finished_draft.save()
assert draft in project.draft_registrations_active.all()
assert draft2 in project.draft_registrations_active.all()
assert finished_draft not in project.draft_registrations_active.all()
def test_update_metadata_interleaves_comments_by_created_timestamp(self, project):
draft = factories.DraftRegistrationFactory(branched_from=project)
now = datetime.datetime.today()
comments = []
times = (now + datetime.timedelta(minutes=i) for i in range(6))
for time in times:
comments.append({
'created': time.isoformat(),
'value': 'Foo'
})
orig_data = {
'foo': {
'value': 'bar',
'comments': [comments[i] for i in range(0, 6, 2)]
}
}
draft.update_metadata(orig_data)
draft.save()
assert draft.registration_metadata['foo']['comments'] == [comments[i] for i in range(0, 6, 2)]
new_data = {
'foo': {
'value': 'bar',
'comments': [comments[i] for i in range(1, 6, 2)]
}
}
draft.update_metadata(new_data)
draft.save()
assert draft.registration_metadata['foo']['comments'] == comments
def test_draft_registration_url(self):
project = factories.ProjectFactory()
draft = factories.DraftRegistrationFactory(branched_from=project)
assert draft.url == settings.DOMAIN + 'registries/drafts/{}'.format(draft._id)
def test_create_from_node_existing(self, user):
node = factories.ProjectFactory(creator=user)
member = factories.AuthUserFactory()
osf_group = factories.OSFGroupFactory(creator=user)
osf_group.make_member(member, auth=Auth(user))
node.add_osf_group(osf_group, ADMIN)
write_contrib = factories.AuthUserFactory()
subject = factories.SubjectFactory()
institution = factories.InstitutionFactory()
user.affiliated_institutions.add(institution)
title = 'A Study of Elephants'
description = 'Loxodonta africana'
category = 'Methods and Materials'
node.set_title(title, Auth(user))
node.set_description(description, Auth(user))
node.category = category
node.add_contributor(write_contrib, permissions=WRITE)
GPL3 = NodeLicense.objects.get(license_id='GPL3')
NEW_YEAR = '2014'
COPYLEFT_HOLDERS = ['Richard Stallman']
node.set_node_license(
{
'id': GPL3.license_id,
'year': NEW_YEAR,
'copyrightHolders': COPYLEFT_HOLDERS
},
auth=Auth(user),
save=True
)
node.add_tag('savanna', Auth(user))
node.add_tag('taxonomy', Auth(user))
node.set_subjects([[subject._id]], auth=Auth(node.creator))
node.affiliated_institutions.add(institution)
node.save()
draft = DraftRegistration.create_from_node(
node=node,
user=user,
schema=factories.get_default_metaschema(),
)
# Assert existing metadata-like node attributes are copied to the draft
assert draft.title == title
assert draft.description == description
assert draft.category == category
assert user in draft.contributors.all()
assert write_contrib in draft.contributors.all()
assert member not in draft.contributors.all()
assert not draft.has_permission(member, 'read')
assert draft.get_permissions(user) == [READ, WRITE, ADMIN]
assert draft.get_permissions(write_contrib) == [READ, WRITE]
assert draft.node_license.license_id == GPL3.license_id
assert draft.node_license.name == GPL3.name
assert draft.node_license.copyright_holders == COPYLEFT_HOLDERS
draft_tags = draft.tags.values_list('name', flat=True)
assert 'savanna' in draft_tags
assert 'taxonomy' in draft_tags
assert subject in draft.subjects.all()
assert institution in draft.affiliated_institutions.all()
assert draft.branched_from == node
def test_create_from_node_draft_node(self, user):
draft = DraftRegistration.create_from_node(
user=user,
schema=factories.get_default_metaschema(),
)
assert draft.title == 'Untitled'
assert draft.description == ''
assert draft.category == ''
assert user in draft.contributors.all()
assert len(draft.contributors.all()) == 1
assert draft.get_permissions(user) == [READ, WRITE, ADMIN]
assert draft.node_license is None
draft_tags = draft.tags.values_list('name', flat=True)
assert len(draft_tags) == 0
assert draft.subjects.count() == 0
assert draft.affiliated_institutions.count() == 0
def test_branched_from_must_be_a_node_or_draft_node(self):
with pytest.raises(DraftRegistrationStateError):
DraftRegistration.create_from_node(
user=user,
node=factories.RegistrationFactory(),
schema=factories.get_default_metaschema()
)
with pytest.raises(DraftRegistrationStateError):
DraftRegistration.create_from_node(
user=user,
node=factories.CollectionFactory(),
schema=factories.get_default_metaschema()
)
def test_can_view_property(self, user):
project = factories.ProjectFactory(creator=user)
write_contrib = factories.UserFactory()
read_contrib = factories.UserFactory()
non_contrib = factories.UserFactory()
draft = DraftRegistration.create_from_node(
user=user,
node=project,
schema=factories.get_default_metaschema()
)
project.add_contributor(non_contrib, ADMIN, save=True)
draft.add_contributor(write_contrib, WRITE, save=True)
draft.add_contributor(read_contrib, READ, save=True)
assert draft.get_permissions(user) == [READ, WRITE, ADMIN]
assert draft.get_permissions(write_contrib) == [READ, WRITE]
assert draft.get_permissions(read_contrib) == [READ]
assert draft.can_view(Auth(user)) is True
assert draft.can_view(Auth(write_contrib)) is True
assert draft.can_view(Auth(read_contrib)) is True
assert draft.can_view(Auth(non_contrib)) is False
class TestSetDraftRegistrationEditableFields(TestNodeEditableFieldsMixin):
@pytest.fixture()
def resource(self, project):
return factories.DraftRegistrationFactory(branched_from=project, title='That Was Then', description='A description')
@pytest.fixture()
def model(self):
return DraftRegistration
class TestDraftRegistrationContributorMethods():
def test_add_contributor(self, draft_registration, user, auth):
# A user is added as a contributor
user = factories.UserFactory()
draft_registration.add_contributor(contributor=user, auth=auth)
draft_registration.save()
assert draft_registration.is_contributor(user) is True
assert draft_registration.has_permission(user, ADMIN) is False
assert draft_registration.has_permission(user, WRITE) is True
assert draft_registration.has_permission(user, READ) is True
last_log = draft_registration.logs.all().order_by('-created')[0]
assert last_log.action == 'contributor_added'
assert last_log.params['contributors'] == [user._id]
def test_add_contributors(self, draft_registration, auth):
user1 = factories.UserFactory()
user2 = factories.UserFactory()
draft_registration.add_contributors(
[
{'user': user1, 'permissions': ADMIN, 'visible': True},
{'user': user2, 'permissions': WRITE, 'visible': False}
],
auth=auth
)
last_log = draft_registration.logs.all().order_by('-created')[0]
assert (
last_log.params['contributors'] ==
[user1._id, user2._id]
)
assert draft_registration.is_contributor(user1)
assert draft_registration.is_contributor(user2)
assert user1._id in draft_registration.visible_contributor_ids
assert user2._id not in draft_registration.visible_contributor_ids
assert draft_registration.get_permissions(user1) == [READ, WRITE, ADMIN]
assert draft_registration.get_permissions(user2) == [READ, WRITE]
last_log = draft_registration.logs.all().order_by('-created')[0]
assert (
last_log.params['contributors'] ==
[user1._id, user2._id]
)
def test_cant_add_creator_as_contributor_twice(self, draft_registration, user):
draft_registration.add_contributor(contributor=user)
draft_registration.save()
assert len(draft_registration.contributors) == 1
def test_cant_add_same_contributor_twice(self, draft_registration):
contrib = factories.UserFactory()
draft_registration.add_contributor(contributor=contrib)
draft_registration.save()
draft_registration.add_contributor(contributor=contrib)
draft_registration.save()
assert len(draft_registration.contributors) == 2
def test_remove_unregistered_conributor_removes_unclaimed_record(self, draft_registration, auth):
new_user = draft_registration.add_unregistered_contributor(fullname='David Davidson',
email='david@davidson.com', auth=auth)
draft_registration.save()
assert draft_registration.is_contributor(new_user) # sanity check
assert draft_registration._primary_key in new_user.unclaimed_records
draft_registration.remove_contributor(
auth=auth,
contributor=new_user
)
draft_registration.save()
new_user.refresh_from_db()
assert draft_registration.is_contributor(new_user) is False
assert draft_registration._primary_key not in new_user.unclaimed_records
def test_is_contributor(self, draft_registration):
contrib, noncontrib = factories.UserFactory(), factories.UserFactory()
DraftRegistrationContributor.objects.create(user=contrib, draft_registration=draft_registration)
assert draft_registration.is_contributor(contrib) is True
assert draft_registration.is_contributor(noncontrib) is False
assert draft_registration.is_contributor(None) is False
def test_visible_initiator(self, project, user):
project_contributor = project.contributor_set.get(user=user)
assert project_contributor.visible is True
draft_reg = factories.DraftRegistrationFactory(branched_from=project, initiator=user)
draft_reg_contributor = draft_reg.contributor_set.get(user=user)
assert draft_reg_contributor.visible is True
def test_non_visible_initiator(self, project, user):
invisible_user = factories.UserFactory()
project.add_contributor(contributor=invisible_user, permissions=ADMIN, visible=False)
invisible_project_contributor = project.contributor_set.get(user=invisible_user)
assert invisible_project_contributor.visible is False
draft_reg = factories.DraftRegistrationFactory(branched_from=project, initiator=invisible_user)
invisible_draft_reg_contributor = draft_reg.contributor_set.get(user=invisible_user)
assert invisible_draft_reg_contributor.visible is False
def test_visible_contributor_ids(self, draft_registration, user):
visible_contrib = factories.UserFactory()
invisible_contrib = factories.UserFactory()
DraftRegistrationContributor.objects.create(user=visible_contrib, draft_registration=draft_registration, visible=True)
DraftRegistrationContributor.objects.create(user=invisible_contrib, draft_registration=draft_registration, visible=False)
assert visible_contrib._id in draft_registration.visible_contributor_ids
assert invisible_contrib._id not in draft_registration.visible_contributor_ids
def test_visible_contributors(self, draft_registration, user):
visible_contrib = factories.UserFactory()
invisible_contrib = factories.UserFactory()
DraftRegistrationContributor.objects.create(user=visible_contrib, draft_registration=draft_registration, visible=True)
DraftRegistrationContributor.objects.create(user=invisible_contrib, draft_registration=draft_registration, visible=False)
assert visible_contrib in draft_registration.visible_contributors
assert invisible_contrib not in draft_registration.visible_contributors
def test_set_visible_false(self, draft_registration, auth):
contrib = factories.UserFactory()
DraftRegistrationContributor.objects.create(user=contrib, draft_registration=draft_registration, visible=True)
draft_registration.set_visible(contrib, visible=False, auth=auth)
draft_registration.save()
assert DraftRegistrationContributor.objects.filter(user=contrib, draft_registration=draft_registration, visible=False).exists() is True
last_log = draft_registration.logs.all().order_by('-created')[0]
assert last_log.user == auth.user
assert last_log.action == NodeLog.MADE_CONTRIBUTOR_INVISIBLE
def test_set_visible_true(self, draft_registration, auth):
contrib = factories.UserFactory()
DraftRegistrationContributor.objects.create(user=contrib, draft_registration=draft_registration, visible=False)
draft_registration.set_visible(contrib, visible=True, auth=auth)
draft_registration.save()
assert DraftRegistrationContributor.objects.filter(user=contrib, draft_registration=draft_registration, visible=True).exists() is True
last_log = draft_registration.logs.all().order_by('-created')[0]
assert last_log.user == auth.user
assert last_log.action == NodeLog.MADE_CONTRIBUTOR_VISIBLE
def test_set_visible_is_noop_if_visibility_is_unchanged(self, draft_registration, auth):
visible, invisible = factories.UserFactory(), factories.UserFactory()
DraftRegistrationContributor.objects.create(user=visible, draft_registration=draft_registration, visible=True)
DraftRegistrationContributor.objects.create(user=invisible, draft_registration=draft_registration, visible=False)
original_log_count = draft_registration.logs.count()
draft_registration.set_visible(invisible, visible=False, auth=auth)
draft_registration.set_visible(visible, visible=True, auth=auth)
draft_registration.save()
assert draft_registration.logs.count() == original_log_count
def test_set_visible_contributor_with_only_one_contributor(self, draft_registration, user):
with pytest.raises(ValueError) as excinfo:
draft_registration.set_visible(user=user, visible=False, auth=None)
assert str(excinfo.value) == 'Must have at least one visible contributor'
def test_set_visible_missing(self, draft_registration):
with pytest.raises(ValueError):
draft_registration.set_visible(factories.UserFactory(), True)
def test_remove_contributor(self, draft_registration, auth):
# A user is added as a contributor
user2 = factories.UserFactory()
draft_registration.add_contributor(contributor=user2, auth=auth, save=True)
assert user2 in draft_registration.contributors
assert draft_registration.has_permission(user2, WRITE)
# The user is removed
draft_registration.remove_contributor(auth=auth, contributor=user2)
draft_registration.reload()
assert user2 not in draft_registration.contributors
assert draft_registration.get_permissions(user2) == []
assert draft_registration.logs.latest().action == 'contributor_removed'
assert draft_registration.logs.latest().params['contributors'] == [user2._id]
def test_remove_contributors(self, draft_registration, auth):
user1 = factories.UserFactory()
user2 = factories.UserFactory()
draft_registration.add_contributors(
[
{'user': user1, 'permissions': WRITE, 'visible': True},
{'user': user2, 'permissions': WRITE, 'visible': True}
],
auth=auth
)
assert user1 in draft_registration.contributors
assert user2 in draft_registration.contributors
assert draft_registration.has_permission(user1, WRITE)
assert draft_registration.has_permission(user2, WRITE)
draft_registration.remove_contributors(auth=auth, contributors=[user1, user2], save=True)
draft_registration.reload()
assert user1 not in draft_registration.contributors
assert user2 not in draft_registration.contributors
assert draft_registration.get_permissions(user1) == []
assert draft_registration.get_permissions(user2) == []
assert draft_registration.logs.latest().action == 'contributor_removed'
def test_replace_contributor(self, draft_registration):
contrib = factories.UserFactory()
draft_registration.add_contributor(contrib, auth=Auth(draft_registration.creator))
draft_registration.save()
assert contrib in draft_registration.contributors.all() # sanity check
replacer = factories.UserFactory()
old_length = draft_registration.contributors.count()
draft_registration.replace_contributor(contrib, replacer)
draft_registration.save()
new_length = draft_registration.contributors.count()
assert contrib not in draft_registration.contributors.all()
assert replacer in draft_registration.contributors.all()
assert old_length == new_length
# test unclaimed_records is removed
assert (
draft_registration._id not in
contrib.unclaimed_records.keys()
)
def test_permission_override_fails_if_no_admins(self, draft_registration, user):
# User has admin permissions because they are the creator
# Cannot lower permissions
with pytest.raises(DraftRegistrationStateError):
draft_registration.add_contributor(user, permissions=WRITE)
def test_update_contributor(self, draft_registration, auth):
new_contrib = factories.AuthUserFactory()
draft_registration.add_contributor(new_contrib, permissions=WRITE, auth=auth)
assert draft_registration.get_permissions(new_contrib) == [READ, WRITE]
assert draft_registration.get_visible(new_contrib) is True
draft_registration.update_contributor(
new_contrib,
READ,
False,
auth=auth
)
assert draft_registration.get_permissions(new_contrib) == [READ]
assert draft_registration.get_visible(new_contrib) is False
def test_update_contributor_non_admin_raises_error(self, draft_registration, auth):
non_admin = factories.AuthUserFactory()
draft_registration.add_contributor(
non_admin,
permissions=WRITE,
auth=auth
)
with pytest.raises(PermissionsError):
draft_registration.update_contributor(
non_admin,
None,
False,
auth=Auth(non_admin)
)
def test_update_contributor_only_admin_raises_error(self, draft_registration, auth):
with pytest.raises(DraftRegistrationStateError):
draft_registration.update_contributor(
auth.user,
WRITE,
True,
auth=auth
)
def test_update_contributor_non_contrib_raises_error(self, draft_registration, auth):
non_contrib = factories.AuthUserFactory()
with pytest.raises(ValueError):
draft_registration.update_contributor(
non_contrib,
ADMIN,
True,
auth=auth
)
class TestDraftRegistrationAffiliatedInstitutions:
def test_affiliated_institutions(self, draft_registration):
inst1, inst2 = factories.InstitutionFactory(), factories.InstitutionFactory()
user = draft_registration.initiator
user.affiliated_institutions.add(inst1, inst2)
draft_registration.add_affiliated_institution(inst1, user=user)
assert inst1 in draft_registration.affiliated_institutions.all()
assert inst2 not in draft_registration.affiliated_institutions.all()
draft_registration.remove_affiliated_institution(inst1, user=user)
assert inst1 not in draft_registration.affiliated_institutions.all()
assert inst2 not in draft_registration.affiliated_institutions.all()
user.affiliated_institutions.remove(inst1)
with pytest.raises(UserNotAffiliatedError):
draft_registration.add_affiliated_institution(inst1, user=user)
class TestDraftRegistrationTagging(TestTagging):
@pytest.fixture()
def node(self, user):
# Overrides "node" resource on tag test, to make it a draft registration instead
project = Node.objects.create(title='Project title', creator_id=user.id)
return factories.DraftRegistrationFactory(branched_from=project)
class TestDraftRegistrationLicenses(TestNodeLicenses):
@pytest.fixture()
def node(self, draft_registration, node_license, user):
# Overrides "node" resource to make it a draft registration instead
draft_registration.node_license = factories.NodeLicenseRecordFactory(
node_license=node_license,
year=self.YEAR,
copyright_holders=self.COPYRIGHT_HOLDERS
)
draft_registration.save()
return draft_registration
class TestDraftRegistrationSubjects(TestNodeSubjects):
@pytest.fixture()
def project(self, draft_registration):
# Overrides "project" resource to make it a draft registration instead
return draft_registration
@pytest.fixture()
def subject(self):
return factories.SubjectFactory()
@pytest.fixture()
def read_contrib(self, project):
read_contrib = factories.AuthUserFactory()
project.add_contributor(read_contrib, auth=Auth(project.creator), permissions=READ)
project.save()
return read_contrib
def test_cannot_set_subjects(self, project, subject, read_contrib):
initial_subjects = list(project.subjects.all())
with pytest.raises(PermissionsError):
project.set_subjects([[subject._id]], auth=Auth(read_contrib))
project.reload()
assert initial_subjects == list(project.subjects.all())
| apache-2.0 |
mshahbaz/bcc-deprecated | tests/cc/test_trace4.py | 1 | 1026 | #!/usr/bin/env python
# Copyright (c) PLUMgrid, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
from bcc import BPF
import os
import sys
from unittest import main, TestCase
class TestKprobeRgx(TestCase):
def setUp(self):
self.b = BPF(text="""
typedef struct { int idx; } Key;
typedef struct { u64 val; } Val;
BPF_TABLE("array", Key, Val, stats, 3);
int hello(void *ctx) {
stats.lookup_or_init(&(Key){1}, &(Val){0})->val++;
return 0;
}
int goodbye(void *ctx) {
stats.lookup_or_init(&(Key){2}, &(Val){0})->val++;
return 0;
}
""")
self.b.attach_kprobe(event_re="^SyS_bp.*", fn_name="hello")
self.b.attach_kretprobe(event_re="^SyS_bp.*", fn_name="goodbye")
def test_send1(self):
k1 = self.b["stats"].Key(1)
k2 = self.b["stats"].Key(2)
self.assertEqual(self.b["stats"][k1].val, self.b["stats"][k2].val + 1)
if __name__ == "__main__":
main()
| apache-2.0 |
sdopoku/flask-hello-world | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/mbcharsetprober.py | 2924 | 3268 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
class MultiByteCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mDistributionAnalyzer = None
self._mCodingSM = None
self._mLastChar = [0, 0]
def reset(self):
CharSetProber.reset(self)
if self._mCodingSM:
self._mCodingSM.reset()
if self._mDistributionAnalyzer:
self._mDistributionAnalyzer.reset()
self._mLastChar = [0, 0]
def get_charset_name(self):
pass
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mDistributionAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
return self._mDistributionAnalyzer.get_confidence()
| gpl-2.0 |
asherkhb/coge | bin/last_wrapper/Bio/PopGen/SimCoal/Template.py | 4 | 7478 | # Copyright 2007 by Tiago Antao. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from math import sqrt
from sys import argv,exit
from os import sep, mkdir
import re
from Bio.PopGen.SimCoal import builtin_tpl_dir
def exec_template(template):
executed_template = template
match = re.search('!!!(.*?)!!!', executed_template, re.MULTILINE)
#while len(match.groups())>0:
while match:
exec_result = str(eval(match.groups()[0]))
executed_template = executed_template.replace(
'!!!' + match.groups()[0] + '!!!',
exec_result, 1)
match = re.search('!!!(.*?)!!!', executed_template, re.MULTILINE)
#match = patt.matcher(String(executed_template))
return executed_template
def process_para(in_string, out_file_prefix, para_list, curr_values):
if (para_list == []):
template = in_string
f_name = out_file_prefix
#f_name += '_' + str(total_size)
for tup in curr_values:
name, val = tup
f_name += '_' + str(val)
#reg = re.compile('\?' + name, re.MULTILINE)
#template = re.sub(reg, str(val), template)
template = template.replace('?'+name, str(val))
f = open(f_name + '.par', 'w')
#executed_template = template
executed_template = exec_template(template)
clean_template = executed_template.replace('\r\n','\n').replace('\n\n','\n')
f.write(clean_template)
f.close()
return [f_name]
else:
name, rng = para_list[0]
fnames = []
for val in rng:
new_values = [(name, val)]
new_values.extend(curr_values)
more_names = process_para(in_string, out_file_prefix, para_list[1:], new_values)
fnames.extend(more_names)
return fnames
def dupe(motif, times):
ret_str = ''
for i in range(1, times + 1):
ret_str += motif + '\r\n'
return ret_str
def get_xy_from_matrix(x_max, y_max, pos):
y = (pos-1) / x_max
x = (pos-1) % x_max
return x, y
def get_step_2d(x_max, y_max, x, y, mig):
my_x, my_y = get_xy_from_matrix(x_max, y_max, y)
other_x, other_y = get_xy_from_matrix(x_max, y_max, x)
if (my_x-other_x)**2 + (my_y-other_y)**2 == 1:
return str(mig) + ' '
else:
return '0 '
def generate_ssm2d_mat(x_max, y_max, mig):
mig_mat = ''
for x in range(1, x_max*y_max + 1):
for y in range(1, x_max*y_max + 1):
mig_mat += get_step_2d(x_max, y_max, x, y, mig)
mig_mat += "\r\n"
return mig_mat
def generate_island_mat(total_size, mig):
mig_mat = ''
for x in range(1, total_size + 1):
for y in range(1, total_size + 1):
if (x == y):
mig_mat += '0 '
else:
mig_mat += '!!!' + str(mig) + '!!! '
mig_mat += "\r\n"
return mig_mat
def generate_null_mat(total_size):
null_mat = ''
for x in range(1, total_size + 1):
for y in range(1, total_size + 1):
null_mat += '0 '
null_mat += '\r\n'
return null_mat
def generate_join_events(t, total_size, join_size, orig_size):
events = ''
for i in range(1, total_size-1):
events += str(t) + ' ' + str(i) + ' 0 1 1 0 1\r\n'
events += str(t) + ' ' + str(total_size-1) + ' 0 1 ' + str(1.0*total_size*join_size/orig_size) + ' 0 1\r\n'
return events
def no_processor(in_string):
return in_string
def process_text(in_string, out_file_prefix, para_list, curr_values,
specific_processor):
text = specific_processor(in_string)
return process_para(text, out_file_prefix, para_list, [])
#def prepare_dir():
# try:
# mkdir(sep.join([Config.dataDir, 'SimCoal'])) #Should exist, but...
# except OSError:
# pass #Its ok if already exists
# try:
# mkdir(sep.join([Config.dataDir, 'SimCoal', 'runs']))
# except OSError:
# pass #Its ok if already exists
#sep is because of jython
def generate_model(par_stream, out_prefix, params,
specific_processor = no_processor, out_dir = '.'):
#prepare_dir()
text = par_stream.read()
out_file_prefix = sep.join([out_dir, out_prefix])
return process_text(text, out_file_prefix, params, [], specific_processor)
def get_demography_template(stream, model, tp_dir = None):
'''
Gets a demograpy template.
Most probably this model needs to be sent to GenCases.
stream - Writable stream.
param - Template file.
tp_dir - Directory where to find the template, if None
use an internal template
'''
if tp_dir == None:
#Internal Template
f = open(sep.join([builtin_tpl_dir, model + '.par']), 'r')
else:
#External template
f = open(sep.join([tp_dir, model + '.par']), 'r')
l = f.readline()
while l!='':
stream.write(l)
l = f.readline()
f.close()
def _gen_loci(stream, loci):
stream.write('//Number of contiguous linkage blocks in chromosome\n')
stream.write(str(len(loci)) + '\n')
stream.write('//Per Block: Data type, No. of loci, Recombination rate to the right-side locus, plus optional parameters\n')
for locus in loci:
stream.write(' '.join([locus[0]] +
map(lambda x: str(x), list(locus[1])
)) + '\n')
def get_chr_template(stream, chrs):
'''
Writes a Simcoal2 loci template part.
stream - Writable stream.
chr - Chromosome list.
Current loci list:
[(chr_repeats,[(marker, (params))])]
chr_repeats --> Number of chromosome repeats
marker --> 'SNP', 'DNA', 'RFLP', 'MICROSAT'
params --> Simcoal2 parameters for markers (list of floats
or ints - if to be processed by generate_model)
'''
num_chrs = reduce(lambda x, y: x + y[0], chrs, 0)
stream.write('//Number of independent (unlinked) chromosomes, and "chromosome structure" flag: 0 for identical structure across chromosomes, and 1 for different structures on different chromosomes.\n')
if len(chrs) > 1 or num_chrs == 1:
stream.write(str(num_chrs) + ' 1\n')
else:
stream.write(str(num_chrs) + ' 0\n')
for chr in chrs:
repeats = chr[0]
loci = chr[1]
if len(chrs) == 1:
_gen_loci(stream, loci)
else:
for i in range(repeats):
_gen_loci(stream, loci)
def generate_simcoal_from_template(model, chrs, params, out_dir = '.', tp_dir=None):
'''
Writes a complete SimCoal2 template file.
This joins together get_demography_template and get_chr_template,
which are feed into generate_model
Please check the three functions for parameters (model from
get_demography_template, chrs from get_chr_template and
params from generate_model).
'''
stream = open(out_dir + sep + 'tmp.par', 'w')
get_demography_template(stream, model, tp_dir)
get_chr_template(stream, chrs)
stream.close()
#par_stream = open(out_dir + sep + 'tmp.par', 'r')
#print par_stream.read()
#par_stream.close()
par_stream = open(out_dir + sep + 'tmp.par', 'r')
generate_model(par_stream, model, params, out_dir = out_dir)
par_stream.close()
| bsd-2-clause |
simleo/pydoop-features | scripts/features.py | 1 | 1380 | # BEGIN_COPYRIGHT
#
# Copyright (C) 2014-2017 Open Microscopy Environment:
# - University of Dundee
# - CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""
Distributed image feature calculation with wnd-charm.
"""
import pydoop.mapreduce.api as api
import pydoop.mapreduce.pipes as pp
from pydoop.avrolib import AvroContext
from pyfeatures.bioimg import BioImgPlane
from pyfeatures.feature_calc import calc_features, to_avro
class Mapper(api.Mapper):
def map(self, ctx):
p = BioImgPlane(ctx.value)
pixels = p.get_xy()
# TODO: support tiling
out_rec = to_avro(calc_features(pixels, p.name))
for name in 'img_path', 'series', 'z', 'c', 't':
out_rec[name] = getattr(p, name)
ctx.emit(None, out_rec)
def __main__():
pp.run_task(pp.Factory(mapper_class=Mapper), context_class=AvroContext)
| apache-2.0 |
Chuban/moose | framework/contrib/nsiqcppstyle/nsiqcppstyle_rulemanager.py | 43 | 10691 | # Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os #@UnusedImport
import sys #@UnusedImport
import sre_compile
from nsiqcppstyle_util import * #@UnusedWildImport
class RuleManager :
def __init__(self, runtimePath) :
self.availRuleNames = []
basePath = os.path.join(runtimePath, "rules")
ruleFiles = os.listdir(basePath)
rulePattern = sre_compile.compile("^(.*)\.py$")
for eachRuleFile in ruleFiles :
if os.path.isfile(os.path.join(basePath, eachRuleFile)) :
ruleMatch = rulePattern.match(eachRuleFile)
if ruleMatch != None and eachRuleFile.find("__init__") == -1 :
ruleName = ruleMatch.group(1)
self.availRuleNames.append(ruleName)
self.availRuleCount = len(self.availRuleNames)
self.availRuleModules = {}
self.loadedRule = []
self.rules = []
self.preprocessRules = []
self.functionNameRules = []
self.functionScopeRules = []
self.typeNameRules = []
self.typeScopeRules = []
self.lineRules = []
self.fileEndRules = []
self.fileStartRules = []
self.projectRules = []
self.rollBackImporter = None
# self.LoadAllRules()
def LoadRules(self, checkingRuleNames, printRule = True):
"""
Load Rules. It resets rule before loading rules
"""
self.ResetRules()
self.ResetRegisteredRules()
if self.rollBackImporter != None :
self.rollBackImporter.uninstall()
self.rollBackImporter = RollbackImporter()
if printRule :
print "======================================================================================"
for ruleName in checkingRuleNames :
count = self.availRuleNames.count(ruleName)
if count == 0 :
print "%s does not exist or incompatible." % ruleName
continue
else :
if printRule :
print " - ", ruleName, "is applied."
ruleModule = __import__("rules."+ruleName)
self.loadedRule.append(ruleModule)
if len(self.loadedRule) == 0 :
print " No Rule is specified. Please configure rules in filefilter.txt."
if printRule :
print "======================================================================================"
def ResetRules(self):
self.loadedRule = []
############################################################################
# Rule Runner
############################################################################
def RunPreprocessRule(self, lexer, contextStack):
""" Run rules which runs in the preprecessor blocks """
for preprocessRule in self.preprocessRules :
data = lexer.Backup()
preprocessRule(lexer, contextStack)
lexer.Restore(data)
def RunFunctionNameRule(self, lexer, functionFullName, decl, contextStack, functionContext) :
""" Run rules which runs on the function name """
for eachFunctionNameRule in self.functionNameRules :
data = lexer.Backup()
eachFunctionNameRule(lexer, functionFullName, decl, contextStack, functionContext)
lexer.Restore(data)
def RunFunctionScopeRule(self, lexer, contextStack):
""" Run rules which runs in the function blocks """
for eachFunctionScopeRule in self.functionScopeRules :
data = lexer.Backup()
eachFunctionScopeRule(lexer, contextStack)
lexer.Restore(data)
def RunTypeNameRule(self, lexer, typeName, typeFullName, decl, contextStack, typeContext):
""" Run rules which runs on the type names """
for typeNameRule in self.typeNameRules :
data = lexer.Backup()
typeNameRule(lexer, typeName, typeFullName, decl, contextStack, typeContext)
lexer.Restore(data)
def RunTypeScopeRule(self, lexer, contextStack):
""" Run rules which runs in the type blocks """
for typeScopeRule in self.typeScopeRules :
data = lexer.Backup()
typeScopeRule(lexer, contextStack)
lexer.Restore(data)
def RunRule(self, lexer, contextStack):
""" Run rules which runs in any tokens """
for rule in self.rules :
data = lexer.Backup()
rule(lexer, contextStack)
lexer.Restore(data)
def RunLineRule(self, lexer, line, lineno):
""" Run rules which runs in each lines. """
for lineRule in self.lineRules :
data = lexer.Backup()
lineRule(lexer, line, lineno)
lexer.Restore(data)
def RunFileEndRule(self, lexer, filename, dirname):
""" Run rules which runs at the end of files. """
for fileEndRule in self.fileEndRules :
data = lexer.Backup()
fileEndRule(lexer, filename, dirname)
lexer.Restore(data)
def RunFileStartRule(self, lexer, filename, dirname):
""" Run rules which runs at the start of files. """
for fileStartRule in self.fileStartRules :
data = lexer.Backup()
fileStartRule(lexer, filename, dirname)
lexer.Restore(data)
def RunProjectRules(self, targetName):
""" Run rules which runs once a project. """
for projectRule in self.projectRules :
projectRule(targetName)
############################################################################
# Rule Resister Methods
############################################################################
def ResetRegisteredRules(self):
""" Reset all registered rules. """
del self.functionNameRules[:]
del self.functionScopeRules[:]
del self.lineRules[:]
del self.rules[:]
del self.typeNameRules[:]
del self.typeScopeRules[:]
del self.fileStartRules[:]
del self.fileEndRules[:]
del self.projectRules[:]
del self.preprocessRules[:]
def AddPreprocessRule(self, preprocessRule):
""" Add rule which runs in preprocess statements """
self.preprocessRules.append(preprocessRule)
def AddFunctionScopeRule(self, functionScopeRule):
""" Add rule which runs in function scope """
self.functionScopeRules.append(functionScopeRule)
def AddFunctionNameRule(self, functionRule):
""" Add rule on the function name place"""
self.functionNameRules.append(functionRule)
def AddLineRule(self, lineRule):
""" Add rule on the each line """
self.lineRules.append(lineRule)
def AddRule(self, rule):
""" Add rule on any token """
self.rules.append(rule)
def AddTypeNameRule(self, typeNameRule):
""" Add rule on any type (class / struct / union / namesapce / enum) """
self.typeNameRules.append(typeNameRule)
def AddTypeScopeRule(self, typeScopeRule):
""" Add rule on the any type definition scope """
self.typeScopeRules.append(typeScopeRule)
def AddFileEndRule(self, fileEndRule):
"""
Add rule on the file end
Added Rule should be function with following prototype "def RunRule(lexer, filename, dirname)"
lexer is the lexer used to analyze the source. it points the end token of source.
filename is the filename analyzed.
dirname is the file directory.
"""
self.fileEndRules.append(fileEndRule)
def AddFileStartRule(self, fileStartRule):
"""
Add rule on the file start
Added Rule should be function with following prototype "def RunRule(lexer, filename, dirname)"
lexer is the lexer used to analyze the source. it points the start token of source.
filename is the filename analyzed.
dirname is the file directory.
"""
self.fileStartRules.append(fileStartRule)
def AddProjectRules(self, projectRule):
"""
Add rule on the project
Added Rule should be function with following prototype "def RunRule(targetName)"
targetName is the analysis target directory.
"""
self.projectRules.append(projectRule)
class RollbackImporter:
def __init__(self):
"Creates an instance and installs as the global importer"
self.previousModules = sys.modules.copy()
self.realImport = __builtins__["__import__"]
__builtins__["__import__"] = self._import
self.newModules = {}
def _import(self, name, globals=None, locals=None, fromlist=[]):
result = apply(self.realImport, (name, globals, locals, fromlist))
if name.find("rules") != -1 :
self.newModules[name] = 1
return result
def uninstall(self):
for modname in self.newModules.keys():
if modname.find("rules") != -1 :
if not self.previousModules.has_key(modname):
# Force reload when modname next imported
del(sys.modules[modname])
__builtins__["__import__"] = self.realImport
ruleManager = RuleManager(GetRuntimePath())
| lgpl-2.1 |
evilmartians/foundry | vendor/lit/setup.py | 113 | 1878 | import lit
import os
from setuptools import setup, find_packages
# setuptools expects to be invoked from within the directory of setup.py, but it
# is nice to allow:
# python path/to/setup.py install
# to work (for scripts, etc.)
os.chdir(os.path.dirname(os.path.abspath(__file__)))
setup(
name = "lit",
version = lit.__version__,
author = lit.__author__,
author_email = lit.__email__,
url = 'http://llvm.org',
license = 'BSD',
description = "A Software Testing Tool",
keywords = 'test C++ automatic discovery',
long_description = """\
*lit*
+++++
About
=====
*lit* is a portable tool for executing LLVM and Clang style test suites,
summarizing their results, and providing indication of failures. *lit* is
designed to be a lightweight testing tool with as simple a user interface as
possible.
Features
========
* Portable!
* Flexible test discovery.
* Parallel test execution.
* Support for multiple test formats and test suite designs.
Documentation
=============
The official *lit* documentation is in the man page, available online at the LLVM
Command Guide: http://llvm.org/cmds/lit.html.
Source
======
The *lit* source is available as part of LLVM, in the LLVM SVN repository:
http://llvm.org/svn/llvm-project/llvm/trunk/utils/lit.
""",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: University of Illinois/NCSA Open Source License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Testing',
],
zip_safe = False,
packages = find_packages(),
entry_points = {
'console_scripts': [
'lit = lit:main',
],
}
)
| mit |
minhphung171093/OpenERP_V8 | openerp/addons/account/report/account_aged_partner_balance.py | 152 | 21511 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class aged_trial_report(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context):
super(aged_trial_report, self).__init__(cr, uid, name, context=context)
self.total_account = []
self.localcontext.update({
'time': time,
'get_lines_with_out_partner': self._get_lines_with_out_partner,
'get_lines': self._get_lines,
'get_total': self._get_total,
'get_direction': self._get_direction,
'get_for_period': self._get_for_period,
'get_company': self._get_company,
'get_currency': self._get_currency,
'get_partners':self._get_partners,
'get_account': self._get_account,
'get_fiscalyear': self._get_fiscalyear,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
ctx = data['form'].get('used_context', {})
ctx.update({'fiscalyear': False, 'all_fiscalyear': True})
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=ctx)
self.direction_selection = data['form'].get('direction_selection', 'past')
self.target_move = data['form'].get('target_move', 'all')
self.date_from = data['form'].get('date_from', time.strftime('%Y-%m-%d'))
if (data['form']['result_selection'] == 'customer' ):
self.ACCOUNT_TYPE = ['receivable']
elif (data['form']['result_selection'] == 'supplier'):
self.ACCOUNT_TYPE = ['payable']
else:
self.ACCOUNT_TYPE = ['payable','receivable']
return super(aged_trial_report, self).set_context(objects, data, ids, report_type=report_type)
def _get_lines(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT DISTINCT res_partner.id AS id,\
res_partner.name AS name \
FROM res_partner,account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) \
AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND account_account.active\
AND ((reconcile_id IS NULL)\
OR (reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND (l.partner_id=res_partner.id)\
AND (l.date <= %s)\
AND ' + self.query + ' \
ORDER BY res_partner.name', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
partners = self.cr.dictfetchall()
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
#
# Build a string like (1,2,3) for easy use in SQL query
partner_ids = [x['id'] for x in partners]
if not partner_ids:
return []
# This dictionary will store the debit-credit for all partners, using partner_id as key.
totals = {}
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND ' + self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals[i[0]] = i[1]
# This dictionary will store the future or past of all partners
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id) \
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids),self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT l.partner_id, SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND (l.partner_id IN %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active\
AND (l.date <= %s)\
GROUP BY l.partner_id', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, tuple(partner_ids), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
future_past[i[0]] = i[1]
# Use one query per period and store results in history (a list variable)
# Each history will contain: history[1] = {'<partner_id>': <partner_debit-credit>}
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), tuple(partner_ids),self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' >= %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' <= %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('''SELECT l.partner_id, SUM(l.debit-l.credit), l.reconcile_partial_id
FROM account_move_line AS l, account_account, account_move am
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)
AND (am.state IN %s)
AND (account_account.type IN %s)
AND (l.partner_id IN %s)
AND ((l.reconcile_id IS NULL)
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))
AND ''' + self.query + '''
AND account_account.active
AND ''' + dates_query + '''
AND (l.date <= %s)
GROUP BY l.partner_id, l.reconcile_partial_id''', args_list)
partners_partial = self.cr.fetchall()
partners_amount = dict((i[0],0) for i in partners_partial)
for partner_info in partners_partial:
if partner_info[2]:
# in case of partial reconciliation, we want to keep the left amount in the oldest period
self.cr.execute('''SELECT MIN(COALESCE(date_maturity,date)) FROM account_move_line WHERE reconcile_partial_id = %s''', (partner_info[2],))
date = self.cr.fetchall()
partial = False
if 'BETWEEN' in dates_query:
partial = date and args_list[-3] <= date[0][0] <= args_list[-2]
elif '>=' in dates_query:
partial = date and date[0][0] >= form[str(i)]['start']
else:
partial = date and date[0][0] <= form[str(i)]['stop']
if partial:
# partial reconcilation
limit_date = 'COALESCE(l.date_maturity,l.date) %s %%s' % '<=' if self.direction_selection == 'past' else '>='
self.cr.execute('''SELECT SUM(l.debit-l.credit)
FROM account_move_line AS l, account_move AS am
WHERE l.move_id = am.id AND am.state in %s
AND l.reconcile_partial_id = %s
AND ''' + limit_date, (tuple(move_state), partner_info[2], self.date_from))
unreconciled_amount = self.cr.fetchall()
partners_amount[partner_info[0]] += unreconciled_amount[0][0]
else:
partners_amount[partner_info[0]] += partner_info[1]
history.append(partners_amount)
for partner in partners:
values = {}
## If choise selection is in the future
if self.direction_selection == 'future':
# Query here is replaced by one query which gets the all the partners their 'before' value
before = False
if future_past.has_key(partner['id']):
before = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past': # Changed this so people could in the future create new direction_selections
# Query here is replaced by one query which gets the all the partners their 'after' value
after = False
if future_past.has_key(partner['id']): # Making sure this partner actually was found by the query
after = [ future_past[partner['id']] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key(partner['id']):
during = [ history[i][partner['id']] ]
# Ajout du compteur
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( partner['id'] ):
total = [ totals[partner['id']] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = partner['name']
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_lines_with_out_partner(self, form):
res = []
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
## mise a 0 du total
for i in range(7):
self.total_account.append(0)
totals = {}
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND ((l.reconcile_id IS NULL) \
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND ' + self.query + '\
AND (l.date <= %s)\
AND account_account.active ',(tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from,))
t = self.cr.fetchall()
for i in t:
totals['Unknown Partner'] = i[0]
future_past = {}
if self.direction_selection == 'future':
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am\
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity, l.date) < %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
elif self.direction_selection == 'past': # Using elif so people could extend without this breaking
self.cr.execute('SELECT SUM(l.debit-l.credit) \
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id=account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (l.partner_id IS NULL)\
AND (account_account.type IN %s)\
AND (COALESCE(l.date_maturity,l.date) > %s)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active ', (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from, self.date_from))
t = self.cr.fetchall()
for i in t:
future_past['Unknown Partner'] = i[0]
history = []
for i in range(5):
args_list = (tuple(move_state), tuple(self.ACCOUNT_TYPE), self.date_from,)
dates_query = '(COALESCE(l.date_maturity,l.date)'
if form[str(i)]['start'] and form[str(i)]['stop']:
dates_query += ' BETWEEN %s AND %s)'
args_list += (form[str(i)]['start'], form[str(i)]['stop'])
elif form[str(i)]['start']:
dates_query += ' > %s)'
args_list += (form[str(i)]['start'],)
else:
dates_query += ' < %s)'
args_list += (form[str(i)]['stop'],)
args_list += (self.date_from,)
self.cr.execute('SELECT SUM(l.debit-l.credit)\
FROM account_move_line AS l, account_account, account_move am \
WHERE (l.account_id = account_account.id) AND (l.move_id=am.id)\
AND (am.state IN %s)\
AND (account_account.type IN %s)\
AND (l.partner_id IS NULL)\
AND ((l.reconcile_id IS NULL)\
OR (l.reconcile_id IN (SELECT recon.id FROM account_move_reconcile AS recon WHERE recon.create_date > %s AND not recon.opening_reconciliation)))\
AND '+ self.query + '\
AND account_account.active\
AND ' + dates_query + '\
AND (l.date <= %s)\
GROUP BY l.partner_id', args_list)
t = self.cr.fetchall()
d = {}
for i in t:
d['Unknown Partner'] = i[0]
history.append(d)
values = {}
if self.direction_selection == 'future':
before = False
if future_past.has_key('Unknown Partner'):
before = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (before and before[0] or 0.0)
values['direction'] = before and before[0] or 0.0
elif self.direction_selection == 'past':
after = False
if future_past.has_key('Unknown Partner'):
after = [ future_past['Unknown Partner'] ]
self.total_account[6] = self.total_account[6] + (after and after[0] or 0.0)
values['direction'] = after and after[0] or 0.0
for i in range(5):
during = False
if history[i].has_key('Unknown Partner'):
during = [ history[i]['Unknown Partner'] ]
self.total_account[(i)] = self.total_account[(i)] + (during and during[0] or 0)
values[str(i)] = during and during[0] or 0.0
total = False
if totals.has_key( 'Unknown Partner' ):
total = [ totals['Unknown Partner'] ]
values['total'] = total and total[0] or 0.0
## Add for total
self.total_account[(i+1)] = self.total_account[(i+1)] + (total and total[0] or 0.0)
values['name'] = 'Unknown Partner'
if values['total']:
res.append(values)
total = 0.0
totals = {}
for r in res:
total += float(r['total'] or 0.0)
for i in range(5)+['direction']:
totals.setdefault(str(i), 0.0)
totals[str(i)] += float(r[str(i)] or 0.0)
return res
def _get_total(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_direction(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_for_period(self,pos):
period = self.total_account[int(pos)]
return period or 0.0
def _get_partners(self,data):
# TODO: deprecated, to remove in trunk
if data['form']['result_selection'] == 'customer':
return self._translate('Receivable Accounts')
elif data['form']['result_selection'] == 'supplier':
return self._translate('Payable Accounts')
elif data['form']['result_selection'] == 'customer_supplier':
return self._translate('Receivable and Payable Accounts')
return ''
class report_agedpartnerbalance(osv.AbstractModel):
_name = 'report.account.report_agedpartnerbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_agedpartnerbalance'
_wrapped_report_class = aged_trial_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hlieberman/ansible-modules-core | commands/command.py | 22 | 8497 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import copy
import sys
import datetime
import glob
import traceback
import re
import shlex
import os
DOCUMENTATION = '''
---
module: command
short_description: Executes a command on a remote node
version_added: historical
description:
- The M(command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($HOME) and operations
like C("<"), C(">"), C("|"), and C("&") will not work (use the M(shell)
module if you need these features).
options:
free_form:
description:
- the command module takes a free form command to run. There is no parameter actually named 'free form'.
See the examples!
required: true
default: null
creates:
description:
- a filename or (since 2.0) glob pattern, when it already exists, this step will B(not) be run.
required: no
default: null
removes:
description:
- a filename or (since 2.0) glob pattern, when it does not exist, this step will B(not) be run.
version_added: "0.8"
required: no
default: null
chdir:
description:
- cd into this directory before running the command
version_added: "0.6"
required: false
default: null
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
required: false
default: null
version_added: "0.9"
warn:
version_added: "1.8"
default: yes
description:
- if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false.
required: false
notes:
- If you want to run a command through the shell (say you are using C(<),
C(>), C(|), etc), you actually want the M(shell) module instead. The
M(command) module is much more secure as it's not affected by the user's
environment.
- " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this."
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
# Example from Ansible Playbooks.
- command: /sbin/shutdown -t now
# Run the command if the specified file does not exist.
- command: /usr/bin/make_database.sh arg1 arg2 creates=/path/to/database
# You can also use the 'args' form to provide the options. This command
# will change the working directory to somedir/ and will only run when
# /path/to/database doesn't exist.
- command: /usr/bin/make_database.sh arg1 arg2
args:
chdir: somedir/
creates: /path/to/database
'''
# Dict of options and their defaults
OPTIONS = {'chdir': None,
'creates': None,
'executable': None,
'NO_LOG': None,
'removes': None,
'warn': True,
}
# This is a pretty complex regex, which functions as follows:
#
# 1. (^|\s)
# ^ look for a space or the beginning of the line
# 2. ({options_list})=
# ^ expanded to (chdir|creates|executable...)=
# look for a valid param, followed by an '='
# 3. (?P<quote>[\'"])?
# ^ look for an optional quote character, which can either be
# a single or double quote character, and store it for later
# 4. (.*?)
# ^ match everything in a non-greedy manner until...
# 5. (?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)
# ^ a non-escaped space or a non-escaped quote of the same kind
# that was matched in the first 'quote' is found, or the end of
# the line is reached
OPTIONS_REGEX = '|'.join(OPTIONS.keys())
PARAM_REGEX = re.compile(
r'(^|\s)(' + OPTIONS_REGEX +
r')=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)'
)
def check_command(commandline):
arguments = { 'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
'ln': 'state=link', 'mkdir': 'state=directory',
'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch' }
commands = { 'git': 'git', 'hg': 'hg', 'curl': 'get_url or uri', 'wget': 'get_url or uri',
'svn': 'subversion', 'service': 'service',
'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt',
'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile',
'rsync': 'synchronize', 'dnf': 'dnf', 'zypper': 'zypper' }
become = [ 'sudo', 'su', 'pbrun', 'pfexec', 'runas' ]
warnings = list()
command = os.path.basename(commandline.split()[0])
if command in arguments:
warnings.append("Consider using file module with %s rather than running %s" % (arguments[command], command))
if command in commands:
warnings.append("Consider using %s module rather than running %s" % (commands[command], command))
if command in become:
warnings.append("Consider using 'become', 'become_method', and 'become_user' rather than running %s" % (command,))
return warnings
def main():
# the command module is the one ansible module that does not take key=value args
# hence don't copy this one if you are looking to build others!
module = AnsibleModule(
argument_spec=dict(
_raw_params = dict(),
_uses_shell = dict(type='bool', default=False),
chdir = dict(),
executable = dict(),
creates = dict(),
removes = dict(),
warn = dict(type='bool', default=True),
)
)
shell = module.params['_uses_shell']
chdir = module.params['chdir']
executable = module.params['executable']
args = module.params['_raw_params']
creates = module.params['creates']
removes = module.params['removes']
warn = module.params['warn']
if args.strip() == '':
module.fail_json(rc=256, msg="no command given")
if chdir:
chdir = os.path.abspath(os.path.expanduser(chdir))
os.chdir(chdir)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
v = os.path.expanduser(creates)
if glob.glob(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % v,
changed=False,
stderr=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
v = os.path.expanduser(removes)
if not glob.glob(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % v,
changed=False,
stderr=False,
rc=0
)
warnings = list()
if warn:
warnings = check_command(args)
if not shell:
args = shlex.split(args)
startd = datetime.datetime.now()
rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell)
endd = datetime.datetime.now()
delta = endd - startd
if out is None:
out = ''
if err is None:
err = ''
module.exit_json(
cmd = args,
stdout = out.rstrip("\r\n"),
stderr = err.rstrip("\r\n"),
rc = rc,
start = str(startd),
end = str(endd),
delta = str(delta),
changed = True,
warnings = warnings
)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.splitter import *
main()
| gpl-3.0 |
wpoa/wiki-imports | lib/python2.7/site-packages/pip/commands/list.py | 393 | 6814 | from pip.basecommand import Command
from pip.exceptions import DistributionNotFound, BestVersionAlreadyInstalled
from pip.index import PackageFinder
from pip.log import logger
from pip.req import InstallRequirement
from pip.util import get_installed_distributions, dist_is_editable
from pip.cmdoptions import make_option_group, index_group
class ListCommand(Command):
"""List installed packages, including editables."""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
# distributions to skip (python itself is reported by pkg_resources.working_set)
skip = ['python']
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages (excluding editables)')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages (excluding editables)')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help='If in a virtualenv that has global access, do not list globally-installed packages.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, pip only finds stable versions.")
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(find_links=options.find_links,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
process_dependency_links=
options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
elif options.editable:
self.run_editables(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, remote_version_raw, remote_version_parsed in self.find_packages_latests_versions(options):
if remote_version_parsed > dist.parsed_version:
logger.notify('%s (Current: %s Latest: %s)' % (dist.project_name,
dist.version, remote_version_raw))
def find_packages_latests_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
if options.use_mirrors:
logger.deprecated("1.7",
"--use-mirrors has been deprecated and will be removed"
" in the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
if options.mirrors:
logger.deprecated("1.7",
"--mirrors has been deprecated and will be removed in "
" the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
index_urls += options.mirrors
dependency_links = []
for dist in get_installed_distributions(local_only=options.local, skip=self.skip):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
session = self._build_session(options)
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(local_only=options.local, include_editables=False, skip=self.skip)
for dist in installed_packages:
req = InstallRequirement.from_line(dist.key, None)
try:
link = finder.find_requirement(req, True)
# If link is None, means installed version is most up-to-date
if link is None:
continue
except DistributionNotFound:
continue
except BestVersionAlreadyInstalled:
remote_version = req.installed_version
else:
# It might be a good idea that link or finder had a public method
# that returned version
remote_version = finder._link_package_versions(link, req.name)[0]
remote_version_raw = remote_version[2]
remote_version_parsed = remote_version[0]
yield dist, remote_version_raw, remote_version_parsed
def run_listing(self, options):
installed_packages = get_installed_distributions(local_only=options.local, skip=self.skip)
self.output_package_listing(installed_packages)
def run_editables(self, options):
installed_packages = get_installed_distributions(local_only=options.local, editables_only=True)
self.output_package_listing(installed_packages)
def output_package_listing(self, installed_packages):
installed_packages = sorted(installed_packages, key=lambda dist: dist.project_name.lower())
for dist in installed_packages:
if dist_is_editable(dist):
line = '%s (%s, %s)' % (dist.project_name, dist.version, dist.location)
else:
line = '%s (%s)' % (dist.project_name, dist.version)
logger.notify(line)
def run_uptodate(self, options):
uptodate = []
for dist, remote_version_raw, remote_version_parsed in self.find_packages_latests_versions(options):
if dist.parsed_version == remote_version_parsed:
uptodate.append(dist)
self.output_package_listing(uptodate)
| gpl-3.0 |
anitagraser/processing_pysal | ext-libs/pysal/core/IOHandlers/tests/test_arcgis_txt.py | 20 | 2268 | import unittest
import pysal
from pysal.core.IOHandlers.arcgis_txt import ArcGISTextIO
import tempfile
import os
import warnings
class test_ArcGISTextIO(unittest.TestCase):
def setUp(self):
self.test_file = test_file = pysal.examples.get_path('arcgis_txt.txt')
self.obj = ArcGISTextIO(test_file, 'r')
def test_close(self):
f = self.obj
f.close()
self.failUnlessRaises(ValueError, f.read)
def test_read(self):
with warnings.catch_warnings(record=True) as warn:
warnings.simplefilter("always")
w = self.obj.read()
if len(warn) > 0:
assert issubclass(warn[0].category, RuntimeWarning)
assert "DBF relating to ArcGIS TEXT was not found, proceeding with unordered string ids." in str(warn[0].message)
self.assertEqual(3, w.n)
self.assertEqual(2.0, w.mean_neighbors)
self.assertEqual([0.1, 0.05], w[2].values())
def test_seek(self):
self.test_read()
self.failUnlessRaises(StopIteration, self.obj.read)
self.obj.seek(0)
self.test_read()
def test_write(self):
with warnings.catch_warnings(record=True) as warn:
warnings.simplefilter("always")
w = self.obj.read()
if len(warn) > 0:
assert issubclass(warn[0].category, RuntimeWarning)
assert "DBF relating to ArcGIS TEXT was not found, proceeding with unordered string ids." in str(warn[0].message)
f = tempfile.NamedTemporaryFile(
suffix='.txt', dir=pysal.examples.get_path(''))
fname = f.name
f.close()
o = pysal.open(fname, 'w', 'arcgis_text')
o.write(w)
o.close()
with warnings.catch_warnings(record=True) as warn:
warnings.simplefilter("always")
wnew = pysal.open(fname, 'r', 'arcgis_text').read()
if len(warn) > 0:
assert issubclass(warn[0].category, RuntimeWarning)
assert "DBF relating to ArcGIS TEXT was not found, proceeding with unordered string ids." in str(warn[0].message)
self.assertEqual(wnew.pct_nonzero, w.pct_nonzero)
os.remove(fname)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
alekz112/statsmodels | statsmodels/sandbox/bspline.py | 33 | 20376 | '''
Bspines and smoothing splines.
General references:
Craven, P. and Wahba, G. (1978) "Smoothing noisy data with spline functions.
Estimating the correct degree of smoothing by
the method of generalized cross-validation."
Numerische Mathematik, 31(4), 377-403.
Hastie, Tibshirani and Friedman (2001). "The Elements of Statistical
Learning." Springer-Verlag. 536 pages.
Hutchison, M. and Hoog, F. "Smoothing noisy data with spline functions."
Numerische Mathematik, 47(1), 99-106.
'''
from statsmodels.compat.python import range
import numpy as np
import numpy.linalg as L
from scipy.linalg import solveh_banded
from scipy.optimize import golden
from models import _hbspline #removed because this was segfaulting
# Issue warning regarding heavy development status of this module
import warnings
_msg = """
The bspline code is technology preview and requires significant work
on the public API and documentation. The API will likely change in the future
"""
warnings.warn(_msg, FutureWarning)
def _band2array(a, lower=0, symmetric=False, hermitian=False):
"""
Take an upper or lower triangular banded matrix and return a
numpy array.
INPUTS:
a -- a matrix in upper or lower triangular banded matrix
lower -- is the matrix upper or lower triangular?
symmetric -- if True, return the original result plus its transpose
hermitian -- if True (and symmetric False), return the original
result plus its conjugate transposed
"""
n = a.shape[1]
r = a.shape[0]
_a = 0
if not lower:
for j in range(r):
_b = np.diag(a[r-1-j],k=j)[j:(n+j),j:(n+j)]
_a += _b
if symmetric and j > 0: _a += _b.T
elif hermitian and j > 0: _a += _b.conjugate().T
else:
for j in range(r):
_b = np.diag(a[j],k=j)[0:n,0:n]
_a += _b
if symmetric and j > 0: _a += _b.T
elif hermitian and j > 0: _a += _b.conjugate().T
_a = _a.T
return _a
def _upper2lower(ub):
"""
Convert upper triangular banded matrix to lower banded form.
INPUTS:
ub -- an upper triangular banded matrix
OUTPUTS: lb
lb -- a lower triangular banded matrix with same entries
as ub
"""
lb = np.zeros(ub.shape, ub.dtype)
nrow, ncol = ub.shape
for i in range(ub.shape[0]):
lb[i,0:(ncol-i)] = ub[nrow-1-i,i:ncol]
lb[i,(ncol-i):] = ub[nrow-1-i,0:i]
return lb
def _lower2upper(lb):
"""
Convert lower triangular banded matrix to upper banded form.
INPUTS:
lb -- a lower triangular banded matrix
OUTPUTS: ub
ub -- an upper triangular banded matrix with same entries
as lb
"""
ub = np.zeros(lb.shape, lb.dtype)
nrow, ncol = lb.shape
for i in range(lb.shape[0]):
ub[nrow-1-i,i:ncol] = lb[i,0:(ncol-i)]
ub[nrow-1-i,0:i] = lb[i,(ncol-i):]
return ub
def _triangle2unit(tb, lower=0):
"""
Take a banded triangular matrix and return its diagonal and the
unit matrix: the banded triangular matrix with 1's on the diagonal,
i.e. each row is divided by the corresponding entry on the diagonal.
INPUTS:
tb -- a lower triangular banded matrix
lower -- if True, then tb is assumed to be lower triangular banded,
in which case return value is also lower triangular banded.
OUTPUTS: d, b
d -- diagonal entries of tb
b -- unit matrix: if lower is False, b is upper triangular
banded and its rows of have been divided by d,
else lower is True, b is lower triangular banded
and its columns have been divieed by d.
"""
if lower: d = tb[0].copy()
else: d = tb[-1].copy()
if lower: return d, (tb / d)
else:
l = _upper2lower(tb)
return d, _lower2upper(l / d)
def _trace_symbanded(a, b, lower=0):
"""
Compute the trace(ab) for two upper or banded real symmetric matrices
stored either in either upper or lower form.
INPUTS:
a, b -- two banded real symmetric matrices (either lower or upper)
lower -- if True, a and b are assumed to be the lower half
OUTPUTS: trace
trace -- trace(ab)
"""
if lower:
t = _zero_triband(a * b, lower=1)
return t[0].sum() + 2 * t[1:].sum()
else:
t = _zero_triband(a * b, lower=0)
return t[-1].sum() + 2 * t[:-1].sum()
def _zero_triband(a, lower=0):
"""
Explicitly zero out unused elements of a real symmetric banded matrix.
INPUTS:
a -- a real symmetric banded matrix (either upper or lower hald)
lower -- if True, a is assumed to be the lower half
"""
nrow, ncol = a.shape
if lower:
for i in range(nrow): a[i,(ncol-i):] = 0.
else:
for i in range(nrow): a[i,0:i] = 0.
return a
class BSpline(object):
'''
Bsplines of a given order and specified knots.
Implementation is based on description in Chapter 5 of
Hastie, Tibshirani and Friedman (2001). "The Elements of Statistical
Learning." Springer-Verlag. 536 pages.
INPUTS:
knots -- a sorted array of knots with knots[0] the lower boundary,
knots[1] the upper boundary and knots[1:-1] the internal
knots.
order -- order of the Bspline, default is 4 which yields cubic
splines
M -- number of additional boundary knots, if None it defaults
to order
coef -- an optional array of real-valued coefficients for the Bspline
of shape (knots.shape + 2 * (M - 1) - order,).
x -- an optional set of x values at which to evaluate the
Bspline to avoid extra evaluation in the __call__ method
'''
# FIXME: update parameter names, replace single character names
# FIXME: `order` should be actual spline order (implemented as order+1)
## FIXME: update the use of spline order in extension code (evaluate is recursively called)
# FIXME: eliminate duplicate M and m attributes (m is order, M is related to tau size)
def __init__(self, knots, order=4, M=None, coef=None, x=None):
knots = np.squeeze(np.unique(np.asarray(knots)))
if knots.ndim != 1:
raise ValueError('expecting 1d array for knots')
self.m = order
if M is None:
M = self.m
self.M = M
self.tau = np.hstack([[knots[0]]*(self.M-1), knots, [knots[-1]]*(self.M-1)])
self.K = knots.shape[0] - 2
if coef is None:
self.coef = np.zeros((self.K + 2 * self.M - self.m), np.float64)
else:
self.coef = np.squeeze(coef)
if self.coef.shape != (self.K + 2 * self.M - self.m):
raise ValueError('coefficients of Bspline have incorrect shape')
if x is not None:
self.x = x
def _setx(self, x):
self._x = x
self._basisx = self.basis(self._x)
def _getx(self):
return self._x
x = property(_getx, _setx)
def __call__(self, *args):
"""
Evaluate the BSpline at a given point, yielding
a matrix B and return
B * self.coef
INPUTS:
args -- optional arguments. If None, it returns self._basisx,
the BSpline evaluated at the x values passed in __init__.
Otherwise, return the BSpline evaluated at the
first argument args[0].
OUTPUTS: y
y -- value of Bspline at specified x values
BUGS:
If self has no attribute x, an exception will be raised
because self has no attribute _basisx.
"""
if not args:
b = self._basisx.T
else:
x = args[0]
b = np.asarray(self.basis(x)).T
return np.squeeze(np.dot(b, self.coef))
def basis_element(self, x, i, d=0):
"""
Evaluate a particular basis element of the BSpline,
or its derivative.
INPUTS:
x -- x values at which to evaluate the basis element
i -- which element of the BSpline to return
d -- the order of derivative
OUTPUTS: y
y -- value of d-th derivative of the i-th basis element
of the BSpline at specified x values
"""
x = np.asarray(x, np.float64)
_shape = x.shape
if _shape == ():
x.shape = (1,)
x.shape = (np.product(_shape,axis=0),)
if i < self.tau.shape[0] - 1:
## TODO: OWNDATA flags...
v = _hbspline.evaluate(x, self.tau, self.m, d, i, i+1)
else:
return np.zeros(x.shape, np.float64)
if (i == self.tau.shape[0] - self.m):
v = np.where(np.equal(x, self.tau[-1]), 1, v)
v.shape = _shape
return v
def basis(self, x, d=0, lower=None, upper=None):
"""
Evaluate the basis of the BSpline or its derivative.
If lower or upper is specified, then only
the [lower:upper] elements of the basis are returned.
INPUTS:
x -- x values at which to evaluate the basis element
i -- which element of the BSpline to return
d -- the order of derivative
lower -- optional lower limit of the set of basis
elements
upper -- optional upper limit of the set of basis
elements
OUTPUTS: y
y -- value of d-th derivative of the basis elements
of the BSpline at specified x values
"""
x = np.asarray(x)
_shape = x.shape
if _shape == ():
x.shape = (1,)
x.shape = (np.product(_shape,axis=0),)
if upper is None:
upper = self.tau.shape[0] - self.m
if lower is None:
lower = 0
upper = min(upper, self.tau.shape[0] - self.m)
lower = max(0, lower)
d = np.asarray(d)
if d.shape == ():
v = _hbspline.evaluate(x, self.tau, self.m, int(d), lower, upper)
else:
if d.shape[0] != 2:
raise ValueError("if d is not an integer, expecting a jx2 \
array with first row indicating order \
of derivative, second row coefficient in front.")
v = 0
for i in range(d.shape[1]):
v += d[1,i] * _hbspline.evaluate(x, self.tau, self.m, d[0,i], lower, upper)
v.shape = (upper-lower,) + _shape
if upper == self.tau.shape[0] - self.m:
v[-1] = np.where(np.equal(x, self.tau[-1]), 1, v[-1])
return v
def gram(self, d=0):
"""
Compute Gram inner product matrix, storing it in lower
triangular banded form.
The (i,j) entry is
G_ij = integral b_i^(d) b_j^(d)
where b_i are the basis elements of the BSpline and (d) is the
d-th derivative.
If d is a matrix then, it is assumed to specify a differential
operator as follows: the first row represents the order of derivative
with the second row the coefficient corresponding to that order.
For instance:
[[2, 3],
[3, 1]]
represents 3 * f^(2) + 1 * f^(3).
INPUTS:
d -- which derivative to apply to each basis element,
if d is a matrix, it is assumed to specify
a differential operator as above
OUTPUTS: gram
gram -- the matrix of inner products of (derivatives)
of the BSpline elements
"""
d = np.squeeze(d)
if np.asarray(d).shape == ():
self.g = _hbspline.gram(self.tau, self.m, int(d), int(d))
else:
d = np.asarray(d)
if d.shape[0] != 2:
raise ValueError("if d is not an integer, expecting a jx2 \
array with first row indicating order \
of derivative, second row coefficient in front.")
if d.shape == (2,):
d.shape = (2,1)
self.g = 0
for i in range(d.shape[1]):
for j in range(d.shape[1]):
self.g += d[1,i]* d[1,j] * _hbspline.gram(self.tau, self.m, int(d[0,i]), int(d[0,j]))
self.g = self.g.T
self.d = d
return np.nan_to_num(self.g)
class SmoothingSpline(BSpline):
penmax = 30.
method = "target_df"
target_df = 5
default_pen = 1.0e-03
optimize = True
'''
A smoothing spline, which can be used to smooth scatterplots, i.e.
a list of (x,y) tuples.
See fit method for more information.
'''
def fit(self, y, x=None, weights=None, pen=0.):
"""
Fit the smoothing spline to a set of (x,y) pairs.
INPUTS:
y -- response variable
x -- if None, uses self.x
weights -- optional array of weights
pen -- constant in front of Gram matrix
OUTPUTS: None
The smoothing spline is determined by self.coef,
subsequent calls of __call__ will be the smoothing spline.
ALGORITHM:
Formally, this solves a minimization:
fhat = ARGMIN_f SUM_i=1^n (y_i-f(x_i))^2 + pen * int f^(2)^2
int is integral. pen is lambda (from Hastie)
See Chapter 5 of
Hastie, Tibshirani and Friedman (2001). "The Elements of Statistical
Learning." Springer-Verlag. 536 pages.
for more details.
TODO:
Should add arbitrary derivative penalty instead of just
second derivative.
"""
banded = True
if x is None:
x = self._x
bt = self._basisx.copy()
else:
bt = self.basis(x)
if pen == 0.: # can't use cholesky for singular matrices
banded = False
if x.shape != y.shape:
raise ValueError('x and y shape do not agree, by default x are \
the Bspline\'s internal knots')
if pen >= self.penmax:
pen = self.penmax
if weights is not None:
self.weights = weights
else:
self.weights = 1.
_w = np.sqrt(self.weights)
bt *= _w
# throw out rows with zeros (this happens at boundary points!)
mask = np.flatnonzero(1 - np.alltrue(np.equal(bt, 0), axis=0))
bt = bt[:,mask]
y = y[mask]
self.df_total = y.shape[0]
bty = np.squeeze(np.dot(bt, _w * y))
self.N = y.shape[0]
if not banded:
self.btb = np.dot(bt, bt.T)
_g = _band2array(self.g, lower=1, symmetric=True)
self.coef, _, self.rank = L.lstsq(self.btb + pen*_g, bty)[0:3]
self.rank = min(self.rank, self.btb.shape[0])
del(_g)
else:
self.btb = np.zeros(self.g.shape, np.float64)
nband, nbasis = self.g.shape
for i in range(nbasis):
for k in range(min(nband, nbasis-i)):
self.btb[k,i] = (bt[i] * bt[i+k]).sum()
bty.shape = (1,bty.shape[0])
self.pen = pen
self.chol, self.coef = solveh_banded(self.btb +
pen*self.g,
bty, lower=1)
self.coef = np.squeeze(self.coef)
self.resid = y * self.weights - np.dot(self.coef, bt)
self.pen = pen
del(bty); del(mask); del(bt)
def smooth(self, y, x=None, weights=None):
if self.method == "target_df":
if hasattr(self, 'pen'):
self.fit(y, x=x, weights=weights, pen=self.pen)
else:
self.fit_target_df(y, x=x, weights=weights, df=self.target_df)
elif self.method == "optimize_gcv":
self.fit_optimize_gcv(y, x=x, weights=weights)
def gcv(self):
"""
Generalized cross-validation score of current fit.
Craven, P. and Wahba, G. "Smoothing noisy data with spline functions.
Estimating the correct degree of smoothing by
the method of generalized cross-validation."
Numerische Mathematik, 31(4), 377-403.
"""
norm_resid = (self.resid**2).sum()
return norm_resid / (self.df_total - self.trace())
def df_resid(self):
"""
Residual degrees of freedom in the fit.
self.N - self.trace()
where self.N is the number of observations of last fit.
"""
return self.N - self.trace()
def df_fit(self):
"""
How many degrees of freedom used in the fit?
self.trace()
"""
return self.trace()
def trace(self):
"""
Trace of the smoothing matrix S(pen)
TODO: addin a reference to Wahba, and whoever else I used.
"""
if self.pen > 0:
_invband = _hbspline.invband(self.chol.copy())
tr = _trace_symbanded(_invband, self.btb, lower=1)
return tr
else:
return self.rank
def fit_target_df(self, y, x=None, df=None, weights=None, tol=1.0e-03,
apen=0, bpen=1.0e-03):
"""
Fit smoothing spline with approximately df degrees of freedom
used in the fit, i.e. so that self.trace() is approximately df.
Uses binary search strategy.
In general, df must be greater than the dimension of the null space
of the Gram inner product. For cubic smoothing splines, this means
that df > 2.
INPUTS:
y -- response variable
x -- if None, uses self.x
df -- target degrees of freedom
weights -- optional array of weights
tol -- (relative) tolerance for convergence
apen -- lower bound of penalty for binary search
bpen -- upper bound of penalty for binary search
OUTPUTS: None
The smoothing spline is determined by self.coef,
subsequent calls of __call__ will be the smoothing spline.
"""
df = df or self.target_df
olddf = y.shape[0] - self.m
if hasattr(self, "pen"):
self.fit(y, x=x, weights=weights, pen=self.pen)
curdf = self.trace()
if np.fabs(curdf - df) / df < tol:
return
if curdf > df:
apen, bpen = self.pen, 2 * self.pen
else:
apen, bpen = 0., self.pen
while True:
curpen = 0.5 * (apen + bpen)
self.fit(y, x=x, weights=weights, pen=curpen)
curdf = self.trace()
if curdf > df:
apen, bpen = curpen, 2 * curpen
else:
apen, bpen = apen, curpen
if apen >= self.penmax:
raise ValueError("penalty too large, try setting penmax \
higher or decreasing df")
if np.fabs(curdf - df) / df < tol:
break
def fit_optimize_gcv(self, y, x=None, weights=None, tol=1.0e-03,
brack=(-100,20)):
"""
Fit smoothing spline trying to optimize GCV.
Try to find a bracketing interval for scipy.optimize.golden
based on bracket.
It is probably best to use target_df instead, as it is
sometimes difficult to find a bracketing interval.
INPUTS:
y -- response variable
x -- if None, uses self.x
df -- target degrees of freedom
weights -- optional array of weights
tol -- (relative) tolerance for convergence
brack -- an initial guess at the bracketing interval
OUTPUTS: None
The smoothing spline is determined by self.coef,
subsequent calls of __call__ will be the smoothing spline.
"""
def _gcv(pen, y, x):
self.fit(y, x=x, pen=np.exp(pen))
a = self.gcv()
return a
a = golden(_gcv, args=(y,x), brack=bracket, tol=tol)
| bsd-3-clause |
janusnic/wagtail | wagtail/wagtailsnippets/widgets.py | 27 | 1734 | from __future__ import absolute_import, unicode_literals
import json
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from wagtail.wagtailadmin.widgets import AdminChooser
class AdminSnippetChooser(AdminChooser):
target_content_type = None
def __init__(self, content_type=None, **kwargs):
if 'snippet_type_name' in kwargs:
snippet_type_name = kwargs.pop('snippet_type_name')
self.choose_one_text = _('Choose %s') % snippet_type_name
self.choose_another_text = _('Choose another %s') % snippet_type_name
self.link_to_chosen_text = _('Edit this %s') % snippet_type_name
super(AdminSnippetChooser, self).__init__(**kwargs)
if content_type is not None:
self.target_content_type = content_type
def render_html(self, name, value, attrs):
model_class = self.target_content_type.model_class()
instance, value = self.get_instance_and_id(model_class, value)
original_field_html = super(AdminSnippetChooser, self).render_html(name, value, attrs)
return render_to_string("wagtailsnippets/widgets/snippet_chooser.html", {
'widget': self,
'original_field_html': original_field_html,
'attrs': attrs,
'value': value,
'item': instance,
})
def render_js_init(self, id_, name, value):
content_type = self.target_content_type
return "createSnippetChooser({id}, {content_type});".format(
id=json.dumps(id_),
content_type=json.dumps('{app}/{model}'.format(
app=content_type.app_label,
model=content_type.model)))
| bsd-3-clause |
xiaolihope/PerfKitBenchmarker-1.7.0 | perfkitbenchmarker/vm_util.py | 1 | 18327 | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of utility functions for working with virtual machines."""
import contextlib
import logging
import os
import random
import re
import string
import subprocess
import tempfile
import threading
import time
import jinja2
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import temp_dir
FLAGS = flags.FLAGS
PRIVATE_KEYFILE = 'perfkitbenchmarker_keyfile'
PUBLIC_KEYFILE = 'perfkitbenchmarker_keyfile.pub'
CERT_FILE = 'perfkitbenchmarker.pem'
# The temporary directory on VMs. We cannot reuse GetTempDir()
# because run_uri will not be available at time of module load and we need
# to use this directory as a base for other module level constants.
VM_TMP_DIR = '/tmp/pkb'
# Default timeout for issuing a command.
DEFAULT_TIMEOUT = 300
# Defaults for retrying commands.
POLL_INTERVAL = 30
TIMEOUT = 1200
FUZZ = .5
MAX_RETRIES = -1
WINDOWS = 'nt'
PASSWORD_LENGTH = 15
OUTPUT_STDOUT = 0
OUTPUT_STDERR = 1
OUTPUT_EXIT_CODE = 2
flags.DEFINE_integer('default_timeout', TIMEOUT, 'The default timeout for '
'retryable commands in seconds.')
flags.DEFINE_integer('burn_cpu_seconds', 0,
'Amount of time in seconds to burn cpu on vm before '
'starting benchmark')
flags.DEFINE_integer('burn_cpu_threads', 1, 'Number of threads to use to '
'burn cpu before starting benchmark.')
flags.DEFINE_integer('background_cpu_threads', None,
'Number of threads of background cpu usage while '
'running a benchmark')
flags.DEFINE_integer('background_network_mbits_per_sec', None,
'Number of megabits per second of background '
'network traffic to generate during the run phase '
'of the benchmark')
class IpAddressSubset(object):
"""Enum of options for --ip_addresses."""
REACHABLE = 'REACHABLE'
BOTH = 'BOTH'
INTERNAL = 'INTERNAL'
EXTERNAL = 'EXTERNAL'
ALL = (REACHABLE, BOTH, INTERNAL, EXTERNAL)
flags.DEFINE_enum('ip_addresses', IpAddressSubset.REACHABLE,
IpAddressSubset.ALL,
'For networking tests: use both internal and external '
'IP addresses (BOTH), external and internal only if '
'the receiving VM is reachable by internal IP (REACHABLE), '
'external IP only (EXTERNAL) or internal IP only (INTERNAL)')
flags.DEFINE_enum('background_network_ip_type', IpAddressSubset.EXTERNAL,
(IpAddressSubset.INTERNAL, IpAddressSubset.EXTERNAL),
'IP address type to use when generating background network '
'traffic')
def GetTempDir():
"""Returns the tmp dir of the current run."""
return temp_dir.GetRunDirPath()
def PrependTempDir(file_name):
"""Returns the file name prepended with the tmp dir of the current run."""
return os.path.join(GetTempDir(), file_name)
def GenTempDir():
"""Creates the tmp dir for the current run if it does not already exist."""
temp_dir.CreateTemporaryDirectories()
def SSHKeyGen():
"""Create PerfKitBenchmarker SSH keys in the tmp dir of the current run."""
if not os.path.isdir(GetTempDir()):
GenTempDir()
if not os.path.isfile(GetPrivateKeyPath()):
create_cmd = ['ssh-keygen',
'-t',
'rsa',
'-N',
'',
'-q',
'-f',
PrependTempDir(PRIVATE_KEYFILE)]
shell_value = RunningOnWindows()
create_process = subprocess.Popen(create_cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
create_process.communicate()
if not os.path.isfile(GetCertPath()):
create_cmd = ['openssl',
'req',
'-x509',
'-new',
'-out',
PrependTempDir(CERT_FILE),
'-key',
PrependTempDir(PRIVATE_KEYFILE)]
shell_value = RunningOnWindows()
create_process = subprocess.Popen(create_cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
create_process.communicate(input='\n' * 7)
def GetPrivateKeyPath():
return PrependTempDir(PRIVATE_KEYFILE)
def GetPublicKeyPath():
return PrependTempDir(PUBLIC_KEYFILE)
def GetCertPath():
return PrependTempDir(CERT_FILE)
def GetSshOptions(ssh_key_filename):
"""Return common set of SSH and SCP options."""
options = [
'-2',
'-o', 'UserKnownHostsFile=/dev/null',
'-o', 'StrictHostKeyChecking=no',
'-o', 'IdentitiesOnly=yes',
'-o', 'PreferredAuthentications=publickey',
'-o', 'PasswordAuthentication=no',
'-o', 'ConnectTimeout=5',
'-o', 'GSSAPIAuthentication=no',
'-o', 'ServerAliveInterval=30',
'-o', 'ServerAliveCountMax=10',
'-i', ssh_key_filename
]
options.extend(FLAGS.ssh_options)
return options
# TODO(skschneider): Remove at least RunParallelProcesses and RunParallelThreads
# from this file (update references to call directly into background_tasks).
RunParallelProcesses = background_tasks.RunParallelProcesses
RunParallelThreads = background_tasks.RunParallelThreads
RunThreaded = background_tasks.RunThreaded
def Retry(poll_interval=POLL_INTERVAL, max_retries=MAX_RETRIES,
timeout=None, fuzz=FUZZ, log_errors=True,
retryable_exceptions=None):
"""A function decorator that will retry when exceptions are thrown.
Args:
poll_interval: The time between tries in seconds. This is the maximum poll
interval when fuzz is specified.
max_retries: The maximum number of retries before giving up. If -1, this
means continue until the timeout is reached. The function will stop
retrying when either max_retries is met or timeout is reached.
timeout: The timeout for all tries in seconds. If -1, this means continue
until max_retries is met. The function will stop retrying when either
max_retries is met or timeout is reached.
fuzz: The amount of randomness in the sleep time. This is used to
keep threads from all retrying at the same time. At 0, this
means sleep exactly poll_interval seconds. At 1, this means
sleep anywhere from 0 to poll_interval seconds.
log_errors: A boolean describing whether errors should be logged.
retryable_exceptions: A tuple of exceptions that should be retried. By
default, this is None, which indicates that all exceptions should
be retried.
Returns:
A function that wraps functions in retry logic. It can be
used as a decorator.
"""
if retryable_exceptions is None:
retryable_exceptions = Exception
def Wrap(f):
"""Wraps the supplied function with retry logic."""
def WrappedFunction(*args, **kwargs):
"""Holds the retry logic."""
local_timeout = FLAGS.default_timeout if timeout is None else timeout
if local_timeout >= 0:
deadline = time.time() + local_timeout
else:
deadline = float('inf')
tries = 0
while True:
try:
tries += 1
return f(*args, **kwargs)
except retryable_exceptions as e:
fuzz_multiplier = 1 - fuzz + random.random() * fuzz
sleep_time = poll_interval * fuzz_multiplier
if ((time.time() + sleep_time) >= deadline or
(max_retries >= 0 and tries > max_retries)):
raise e
else:
if log_errors:
logging.error('Got exception running %s: %s', f.__name__, e)
time.sleep(sleep_time)
return WrappedFunction
return Wrap
def IssueCommand(cmd, force_info_log=False, suppress_warning=False,
env=None, timeout=DEFAULT_TIMEOUT, input=None):
"""Tries running the provided command once.
Args:
cmd: A list of strings such as is given to the subprocess.Popen()
constructor.
force_info_log: A boolean indicating whether the command result should
always be logged at the info level. Command results will always be
logged at the debug level if they aren't logged at another level.
suppress_warning: A boolean indicating whether the results should
not be logged at the info level in the event of a non-zero
return code. When force_info_log is True, the output is logged
regardless of suppress_warning's value.
env: A dict of key/value strings, such as is given to the subprocess.Popen()
constructor, that contains environment variables to be injected.
timeout: Timeout for the command in seconds. If the command has not finished
before the timeout is reached, it will be killed. Set timeout to None to
let the command run indefinitely. If the subprocess is killed, the
return code will indicate an error, and stdout and stderr will
contain what had already been written to them before the process was
killed.
Returns:
A tuple of stdout, stderr, and retcode from running the provided command.
"""
logging.debug('Environment variables: %s' % env)
full_cmd = ' '.join(cmd)
logging.info('Running: %s', full_cmd)
shell_value = RunningOnWindows()
with tempfile.TemporaryFile() as tf_out, tempfile.TemporaryFile() as tf_err:
process = subprocess.Popen(cmd, env=env, shell=shell_value,
stdin=subprocess.PIPE, stdout=tf_out,
stderr=tf_err)
def _KillProcess():
logging.error('IssueCommand timed out after %d seconds. '
'Killing command "%s".', timeout, full_cmd)
process.kill()
timer = threading.Timer(timeout, _KillProcess)
timer.start()
try:
process.wait()
finally:
timer.cancel()
tf_out.seek(0)
stdout = tf_out.read().decode('ascii', 'ignore')
tf_err.seek(0)
stderr = tf_err.read().decode('ascii', 'ignore')
debug_text = ('Ran %s. Got return code (%s).\nSTDOUT: %s\nSTDERR: %s' %
(full_cmd, process.returncode, stdout, stderr))
if force_info_log or (process.returncode and not suppress_warning):
logging.info(debug_text)
else:
logging.debug(debug_text)
return stdout, stderr, process.returncode
def IssueBackgroundCommand(cmd, stdout_path, stderr_path, env=None):
"""Run the provided command once in the background.
Args:
cmd: Command to be run, as expected by subprocess.Popen.
stdout_path: Redirect stdout here. Overwritten.
stderr_path: Redirect stderr here. Overwritten.
env: A dict of key/value strings, such as is given to the subprocess.Popen()
constructor, that contains environment variables to be injected.
"""
logging.debug('Environment variables: %s' % env)
full_cmd = ' '.join(cmd)
logging.info('Spawning: %s', full_cmd)
outfile = open(stdout_path, 'w')
errfile = open(stderr_path, 'w')
shell_value = RunningOnWindows()
subprocess.Popen(cmd, env=env, shell=shell_value,
stdout=outfile, stderr=errfile, close_fds=True)
@Retry()
def IssueRetryableCommand(cmd, env=None):
"""Tries running the provided command until it succeeds or times out.
Args:
cmd: A list of strings such as is given to the subprocess.Popen()
constructor.
env: An alternate environment to pass to the Popen command.
Returns:
A tuple of stdout and stderr from running the provided command.
"""
stdout, stderr, retcode = IssueCommand(cmd, env=env)
if retcode:
raise errors.VmUtil.CalledProcessException(
'Command returned a non-zero exit code.\n')
return stdout, stderr
def ParseTimeCommandResult(command_result):
"""Parse command result and get time elapsed.
Note this parses the output of bash's time builtin, not /usr/bin/time or other
implementations. You may need to run something like bash -c "time ./command"
to produce parseable output.
Args:
command_result: The result after executing a remote time command.
Returns:
Time taken for the command.
"""
time_data = re.findall(r'real\s+(\d+)m(\d+.\d+)', command_result)
time_in_seconds = 60 * float(time_data[0][0]) + float(time_data[0][1])
return time_in_seconds
def ShouldRunOnExternalIpAddress():
"""Returns whether a test should be run on an instance's external IP."""
return FLAGS.ip_addresses in (IpAddressSubset.EXTERNAL,
IpAddressSubset.BOTH,
IpAddressSubset.REACHABLE)
def ShouldRunOnInternalIpAddress(sending_vm, receiving_vm):
"""Returns whether a test should be run on an instance's internal IP.
Based on the command line flag --ip_addresses. Internal IP addresses are used
when:
* --ip_addresses=BOTH or --ip-addresses=INTERNAL
* --ip_addresses=REACHABLE and 'sending_vm' can ping 'receiving_vm' on its
internal IP.
Args:
sending_vm: VirtualMachine. The client.
receiving_vm: VirtualMachine. The server.
Returns:
Whether a test should be run on an instance's internal IP.
"""
return (FLAGS.ip_addresses in (IpAddressSubset.BOTH,
IpAddressSubset.INTERNAL) or
(FLAGS.ip_addresses == IpAddressSubset.REACHABLE and
sending_vm.IsReachable(receiving_vm)))
def GetLastRunUri():
"""Returns the last run_uri used (or None if it can't be determined)."""
runs_dir_path = temp_dir.GetAllRunsDirPath()
try:
dir_names = next(os.walk(runs_dir_path))[1]
except StopIteration:
# The runs directory was not found.
return None
if not dir_names:
# No run subdirectories were found in the runs directory.
return None
# Return the subdirectory with the most recent modification time.
return max(dir_names,
key=lambda d: os.path.getmtime(os.path.join(runs_dir_path, d)))
@contextlib.contextmanager
def NamedTemporaryFile(prefix='tmp', suffix='', dir=None, delete=True):
"""Behaves like tempfile.NamedTemporaryFile.
The existing tempfile.NamedTemporaryFile has the annoying property on
Windows that it cannot be opened a second time while it is already open.
This makes it impossible to use it with a "with" statement in a cross platform
compatible way. This serves a similar role, but allows the file to be closed
within a "with" statement without causing the file to be unlinked until the
context exits.
"""
f = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix,
dir=dir, delete=False)
try:
yield f
finally:
if not f.closed:
f.close()
if delete:
os.unlink(f.name)
def GenerateSSHConfig(vms, vm_groups):
"""Generates an SSH config file to simplify connecting to the specified VMs.
Writes a file to GetTempDir()/ssh_config with an SSH configuration for each VM
provided in the arguments. Users can then SSH with any of the following:
ssh -F <ssh_config_path> <vm_name>
ssh -F <ssh_config_path> vm<vm_index>
ssh -F <ssh_config_path> <group_name>-<index>
Args:
vms: list of BaseVirtualMachines.
vm_groups: dict mapping VM group name string to list of BaseVirtualMachines.
"""
target_file = os.path.join(GetTempDir(), 'ssh_config')
template_path = data.ResourcePath('ssh_config.j2')
environment = jinja2.Environment(undefined=jinja2.StrictUndefined)
with open(template_path) as fp:
template = environment.from_string(fp.read())
with open(target_file, 'w') as ofp:
ofp.write(template.render({'vms': vms, 'vm_groups': vm_groups}))
ssh_options = [' ssh -F {0} {1}'.format(target_file, pattern)
for pattern in ('<vm_name>', 'vm<index>',
'<group_name>-<index>')]
logging.info('ssh to VMs in this benchmark by name with:\n%s',
'\n'.join(ssh_options))
def RunningOnWindows():
"""Returns True if PKB is running on Windows."""
return os.name == WINDOWS
def ExecutableOnPath(executable_name):
"""Return True if the given executable can be found on the path."""
cmd = ['where'] if RunningOnWindows() else ['which']
cmd.append(executable_name)
shell_value = RunningOnWindows()
process = subprocess.Popen(cmd,
shell=shell_value,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
process.communicate()
if process.returncode:
return False
return True
def GenerateRandomWindowsPassword(password_length=PASSWORD_LENGTH):
"""Generates a password that meets Windows complexity requirements."""
# The special characters have to be recognized by the Azure CLI as
# special characters. This greatly limits the set of characters
# that we can safely use. See
# https://github.com/Azure/azure-xplat-cli/blob/master/lib/commands/arm/vm/vmOsProfile._js#L145
special_chars = '*!@#$%+='
password = [
random.choice(string.ascii_letters + string.digits + special_chars)
for _ in range(password_length - 4)]
# Ensure that the password contains at least one of each 4 required
# character types.
password.append(random.choice(string.ascii_lowercase))
password.append(random.choice(string.ascii_uppercase))
password.append(random.choice(string.digits))
password.append(random.choice(special_chars))
return ''.join(password)
| apache-2.0 |
brutasse/graphite-api | graphite_api/render/grammar.py | 1 | 3106 | from distutils.version import StrictVersion
from pyparsing import (
__version__, alphanums, alphas, CaselessKeyword, CaselessLiteral, Combine,
delimitedList, FollowedBy, Forward, Group, LineEnd, Literal, OneOrMore,
Optional, printables, quotedString, Regex, Word, ZeroOrMore,
)
grammar = Forward()
expression = Forward()
# Literals
intNumber = Regex(r'-?\d+')('integer')
floatNumber = Regex(r'-?\d+\.\d+')('float')
sciNumber = Combine(
(floatNumber | intNumber) + CaselessLiteral('e') + intNumber
)('scientific')
aString = quotedString('string')
# Use lookahead to match only numbers in a list (can't remember why this
# is necessary)
afterNumber = FollowedBy(",") ^ FollowedBy(")") ^ FollowedBy(LineEnd())
number = Group(
(sciNumber + afterNumber) |
(floatNumber + afterNumber) |
(intNumber + afterNumber)
)('number')
boolean = Group(
CaselessKeyword("true") |
CaselessKeyword("false")
)('boolean')
argname = Word(alphas + '_', alphanums + '_')('argname')
funcname = Word(alphas + '_', alphanums + '_')('funcname')
# Symbols
leftParen = Literal('(').suppress()
rightParen = Literal(')').suppress()
comma = Literal(',').suppress()
equal = Literal('=').suppress()
# Function calls
# Symbols
leftBrace = Literal('{')
rightBrace = Literal('}')
leftParen = Literal('(').suppress()
rightParen = Literal(')').suppress()
comma = Literal(',').suppress()
equal = Literal('=').suppress()
backslash = Literal('\\').suppress()
symbols = '''(){},=.'"\\'''
arg = Group(
boolean |
number |
aString |
expression
)('args*')
kwarg = Group(argname + equal + arg)('kwargs*')
# lookahead to prevent failing on equals
args = delimitedList(~kwarg + arg)
kwargs = delimitedList(kwarg)
call = Group(
funcname + leftParen +
Optional(
args + Optional(
comma + kwargs
)
) + rightParen
)('call')
# Metric pattern (aka. pathExpression)
validMetricChars = ''.join((set(printables) - set(symbols)))
escapedChar = backslash + Word(symbols, exact=1)
partialPathElem = Combine(
OneOrMore(
escapedChar | Word(validMetricChars)
)
)
matchEnum = Combine(
leftBrace +
delimitedList(partialPathElem, combine=True) +
rightBrace
)
pathElement = Combine(
Group(partialPathElem | matchEnum) +
ZeroOrMore(matchEnum | partialPathElem)
)
pathExpression = delimitedList(pathElement,
delim='.', combine=True)('pathExpression')
litarg = Group(
number | aString
)('args*')
litkwarg = Group(argname + equal + litarg)('kwargs*')
# lookahead to prevent failing on equals
litargs = delimitedList(~litkwarg + litarg)
litkwargs = delimitedList(litkwarg)
template = Group(
Literal('template') + leftParen +
(call | pathExpression) +
Optional(comma + (litargs | litkwargs)) +
rightParen
)('template')
if StrictVersion(__version__) >= StrictVersion('2.0.0'):
expression <<= Group(template | call | pathExpression)('expression')
grammar <<= expression
else:
expression << (Group(template | call | pathExpression)('expression'))
grammar << expression
| apache-2.0 |
Bajoo/client-pc | bajoo/gui/common/share_location_browser.py | 1 | 1890 | # -*- coding: utf-8 -*-
from os import path
import wx
from wx.lib.filebrowsebutton import DirBrowseButton
from ...common.i18n import _
class ShareLocationBrowser(DirBrowseButton):
"""
Derive the class DirBrowseButton to apply the share name as child folder
each time user changes the directory by clicking "Browse".
This class is created because wx does not supply an "EVT_BROWSE" event.
"""
def __init__(self, **kwargs):
DirBrowseButton.__init__(self, changeCallback=self._on_changed,
buttonText=_('Browse'), **kwargs)
self._share_name = ''
self._parent_folder = kwargs.get('startDirectory', '.')
def _on_changed(self, __event):
self._post_text_event()
def _post_text_event(self):
"""
Redirect both events when user clicks button "Browse"
or changes the text to the wx.EVT_TEXT event.
"""
evt = wx.CommandEvent(wx.wxEVT_COMMAND_TEXT_UPDATED)
evt.SetEventObject(self)
evt.SetId(self.GetId())
wx.PostEvent(self, evt)
def OnBrowse(self, event=None):
"""
After browsing for parent directory of the share, apply the
share name as child directory
"""
DirBrowseButton.OnBrowse(self, event)
self._parent_folder = self.GetValue()
self.SetValue(path.join(self._parent_folder, self._share_name))
self._post_text_event()
def set_share_name(self, share_name, set_value_now=True):
"""
Change the share name, this should be called each time user
changes the share name value.
Args:
set_value_now (boolean):
Change the location value immediately
"""
self._share_name = share_name
if set_value_now:
self.SetValue(path.join(self._parent_folder, self._share_name))
| gpl-3.0 |
ronkyo/mi-instrument | mi/platform/util/network.py | 9 | 21094 | #!/usr/bin/env python
"""
@package ion.agents.platform.util.network
@file ion/agents/platform/util/network.py
@author Carlos Rueda
@brief Supporting elements for representation of a platform network
"""
__author__ = 'Carlos Rueda'
__license__ = 'Apache 2.0'
# NOTE: No use of any pyon stuff in this module mainly to also facilitate use by
# simulator, which uses a regular threading.Thread when run as a separate
# process, so we avoid gevent monkey-patching issues.
import hashlib
class BaseNode(object):
"""
A convenient base class for the components of a platform network.
"""
def __init__(self):
# cached value for the checksum property.
self._checksum = None
def diff(self, other):
"""
Returns None if this and the other object are the same.
Otherwise, returns a message describing the first difference.
"""
raise NotImplementedError() # pragma: no cover
@property
def checksum(self):
"""
Gets the last value computed by compute_checksum, if any.
If no such value is available (for example, compute_checksum hasn't
been called yet, or the cached checksum has been invalidated), then
this method calls compute_checksum to obtain the checksum.
@note Unless client code has good control about the changes done on
instances of this class (including changes in children nodes), that is,
by making sure to invalidate the corresponding cached values upon any
changes, the use of this property is *not* recommended; instead call
compute_checksum, which always compute the checksum based on the
current state of the node and its children.
@return SHA1 hash value as string of hexadecimal digits.
"""
if not self._checksum:
self._checksum = self.compute_checksum()
return self._checksum
def compute_checksum(self):
"""
Computes the checksum for this object, updating the cached value for
future calls to the checksum property. Subclasses do not need
overwrite this method.
@return SHA1 hash value as string of hexadecimal digits
"""
self._checksum = self._compute_checksum()
return self._checksum
def _compute_checksum(self):
"""
Subclasses implement this method to compute the checksum for
this object. For any checksum computation of subcomponents,
the implementation should call compute_checksum on the subcomponent.
@return SHA1 hash value as string of hexadecimal digits
"""
raise NotImplementedError() # pragma: no cover
class AttrNode(BaseNode):
"""
Represents a platform attribute.
"""
def __init__(self, attr_id, defn):
"""
OOIION-1551:
First, get attr_name and attr_instance from the given attr_id (this is
the preferred mechanism as it's expected to be of the form
"<name>|<instance>") or from properties in defn, and resorting to
the given attr_id for the name, and "0" for the instance.
Finally, the store attr_id is composed from the name and instance as
captured above.
"""
BaseNode.__init__(self)
idx = attr_id.rfind('|')
if idx >= 0:
self._attr_name = attr_id[:idx]
self._attr_instance = attr_id[idx + 1:]
else:
self._attr_name = defn.get('attr_name', attr_id)
self._attr_instance = defn.get('attr_instance', "0")
self._attr_id = self._attr_name
defn['attr_id'] = self._attr_id
self._defn = defn
def __repr__(self):
return "AttrNode{id=%s, defn=%s}" % (self.attr_id, self.defn)
@property
def attr_name(self):
return self._attr_name
@property
def attr_instance(self):
return self._attr_instance
@property
def attr_id(self):
return self._attr_id
@property
def defn(self):
return self._defn
@property
def writable(self):
return self.defn.get('read_write', '').lower().find("write") >= 0
def diff(self, other):
if self.attr_id != other.attr_id:
return "Attribute IDs are different: %r != %r" % (
self.attr_id, other.attr_id)
if self.defn != other.defn:
return "Attribute definitions are different: %r != %r" % (
self.defn, other.defn)
return None
def _compute_checksum(self):
hash_obj = hashlib.sha1()
hash_obj.update("attribute_id=%s" % self.attr_id)
# properties:
hash_obj.update("attribute_properties:")
for key in sorted(self.defn.keys()):
if key not in ["attr_name", "attr_instance", "attr_id"]:
val = self.defn[key]
hash_obj.update("%s=%s;" % (key, val))
return hash_obj.hexdigest()
class PortNode(BaseNode):
"""
Represents a platform port.
self._port_id
self._instruments = { instrument_id: InstrumentNode, ... }
"""
def __init__(self, port_id):
BaseNode.__init__(self)
self._port_id = str(port_id)
self._instruments = {}
self._state = None
def __repr__(self):
return "PortNode{id=%s}" % (
self._port_id)
@property
def port_id(self):
return self._port_id
@property
def state(self):
return self._state
def set_state(self, state):
self._state = state
@property
def instruments(self):
"""
Instruments of this port.
"""
return self._instruments
def add_instrument(self, instrument):
if instrument.instrument_id in self._instruments:
raise Exception('%s: duplicate instrument ID' % instrument.instrument_id)
self._instruments[instrument.instrument_id] = instrument
def remove_instrument(self, instrument_id):
if instrument_id not in self._instruments:
raise Exception('%s: Not such instrument ID' % instrument_id)
del self._instruments[instrument_id]
def diff(self, other):
"""
Returns None if the two ports are the same.
Otherwise, returns a message describing the first difference.
"""
if self.port_id != other.port_id:
return "Port IDs are different: %r != %r" % (
self.port_id, other.port_id)
if self.state != other.state:
return "Port state values are different: %r != %r" % (
self.state, other.state)
# compare instruments:
instrument_ids = set(self.instruments.iterkeys())
other_instrument_ids = set(other.instruments.iterkeys())
if instrument_ids != other_instrument_ids:
return "port_id=%r: instrument IDs are different: %r != %r" % (
self.port_id, instrument_ids, other_instrument_ids)
for instrument_id, instrument in self.instruments.iteritems():
other_instrument = other.instruments[instrument_id]
diff = instrument.diff(other_instrument)
if diff:
return diff
return None
def _compute_checksum(self):
hash_obj = hashlib.sha1()
# id:
hash_obj.update("port_id=%s;" % self.port_id)
# state:
hash_obj.update("port_state=%s;" % self.state)
# instruments:
hash_obj.update("port_instruments:")
for key in sorted(self.instruments.keys()):
instrument = self.instruments[key]
hash_obj.update(instrument.compute_checksum())
return hash_obj.hexdigest()
class MissionNode(BaseNode):
"""
Represents the missions available.
"""
def __init__(self, mission_id):
BaseNode.__init__(self)
self._mission_id = mission_id
def __repr__(self):
return "MissionNode(%s)" % self._mission_id
@property
def mission_id(self):
return self._mission_id
def diff(self, other):
"""
Returns None if they are the same.
"""
if self._mission_id != other.mission_id:
return "Mission Ids are different"
return None
def _compute_checksum(self):
return 2
class InstrumentNode(BaseNode):
"""
Represents an instrument in a port.
Note, also used directly in PlatformNode to capture the configuration for
instruments.
self._instrument_id
self._attrs = { ... }
self._CFG = dict
The _CFG element included for convenience to capture the provided
configuration dict.
"""
def __init__(self, instrument_id, attrs=None, CFG=None):
BaseNode.__init__(self)
self._instrument_id = instrument_id
self._attrs = attrs or {}
self._CFG = CFG
def __repr__(self):
return "InstrumentNode{id=%s, attrs=%s}" % (
self.instrument_id, self.attrs)
@property
def instrument_id(self):
return self._instrument_id
@property
def attrs(self):
"""
Attributes of this instrument.
"""
return self._attrs
@property
def CFG(self):
return self._CFG
def diff(self, other):
"""
Returns None if the two instruments are the same.
Otherwise, returns a message describing the first difference.
"""
if self.instrument_id != other.instrument_id:
return "Instrument IDs are different: %r != %r" % (
self.instrument_id, other.instrument_id)
if self.attrs != other.attrs:
return "Instrument attributes are different: %r != %r" % (
self.attrs, other.attrs)
return None
def _compute_checksum(self):
hash_obj = hashlib.sha1()
hash_obj.update("instrument_id=%s;" % self.instrument_id)
hash_obj.update("instrument_attributes:")
for key in sorted(self.attrs.keys()):
val = self.attrs[key]
hash_obj.update("%s=%s;" % (key, val))
return hash_obj.hexdigest()
class PlatformNode(BaseNode):
"""
Platform node for purposes of representing the network.
self._platform_id
self._platform_types = [type, ...]
self._attrs = { attr_id: AttrNode, ... }
self._ports = { port_id: PortNode, ... }
self._subplatforms = { platform_id: PlatformNode, ...}
self._parent = None | PlatformNode
self._instruments = { instrument_id: InstrumentNode, ...}
self._CFG = dict
The _CFG element included for convenience to capture the provided
configuration dict in PlatformAgent. See
create_network_definition_from_ci_config()
NOTE: because _instruments is only to capture provided instrument
configuration, this property is NOT taken into account for the
_compute_checksum operation, which is intended for the current state of
the attributes and ports (which also include instruments but in the sense
of being connected to the port).
"""
#TODO: some separation of configuration vs. state would be convenient.
def __init__(self, platform_id, platform_types=None, CFG=None):
BaseNode.__init__(self)
self._platform_id = platform_id
self._platform_types = platform_types or []
self._name = None
self._ports = {}
self._attrs = {}
self._subplatforms = {}
self._parent = None
self._instruments = {}
self._CFG = CFG
self._missions = {}
def set_name(self, name):
self._name = name
def add_port(self, port):
if port.port_id in self._ports:
raise Exception('%s: duplicate port ID' % port.port_id)
self._ports[port.port_id] = port
def add_attribute(self, attr):
if attr.attr_id in self._attrs:
raise Exception('%s: duplicate attribute ID' % attr.attr_id)
self._attrs[attr.attr_id] = attr
def add_mission(self, mission):
if mission.mission_id in self._missions:
raise Exception('%s: duplicate mission ID' % mission.mission_id)
self._missions[mission.mission_id] = mission
@property
def platform_id(self):
return self._platform_id
@property
def platform_types(self):
return self._platform_types
@property
def name(self):
return self._name
@property
def ports(self):
return self._ports
@property
def attrs(self):
return self._attrs
@property
def missions(self):
return self._missions
def get_port(self, port_id):
return self._ports[port_id]
@property
def CFG(self):
return self._CFG
@property
def parent(self):
return self._parent
@property
def subplatforms(self):
return self._subplatforms
def add_subplatform(self, pn):
if pn.platform_id in self._subplatforms:
raise Exception('%s: duplicate subplatform ID' % pn.platform_id)
self._subplatforms[pn.platform_id] = pn
pn._parent = self
@property
def instruments(self):
"""
Instruments configured for this platform.
"""
return self._instruments
def add_instrument(self, instrument):
if instrument.instrument_id in self._instruments:
raise Exception('%s: duplicate instrument ID' % instrument.instrument_id)
self._instruments[instrument.instrument_id] = instrument
def __str__(self):
s = []
s.append("<%s" % self.platform_id)
if self.name:
s.append("/name=%s" % self.name)
s.append("/types=%s" % self.platform_types)
s.append(">\n")
s.append("ports=%s\n" % list(self.ports.itervalues()))
s.append("attrs=%s\n" % list(self.attrs.itervalues()))
s.append("missions=%s\n" % list(self.missions.itervalues()))
s.append("#subplatforms=%d\n" % len(self.subplatforms))
s.append("#instruments=%d\n" % len(self.instruments))
return ''.join(s)
def get_map(self, pairs):
"""
Helper for getting the list of (platform_id, parent_platform_id) pairs.
"""
if self._parent:
pairs.append((self.platform_id, self.parent.platform_id))
for sub_platform in self.subplatforms.itervalues():
sub_platform.get_map(pairs)
return pairs
def diff(self, other):
"""
Returns None if the two PlatformNode's represent the same topology and
same attributes and ports.
Otherwise, returns a message describing the first difference.
"""
if self.platform_id != other.platform_id:
return "platform IDs are different: %r != %r" % (
self.platform_id, other.platform_id)
if self.name != other.name:
return "platform names are different: %r != %r" % (
self.name, other.name)
if self.platform_types != other.platform_types:
return "platform types are different: %r != %r" % (
self.platform_types, other.platform_types)
# compare parents:
if (self.parent is None) != (other.parent is None):
return "platform parents are different: %r != %r" % (
self.parent, other.parent)
if self.parent is not None and self.parent.platform_id != other.parent.platform_id:
return "platform parents are different: %r != %r" % (
self.parent.platform_id, other.parent.platform_id)
# compare attributes:
attr_ids = set(self.attrs.iterkeys())
other_attr_ids = set(other.attrs.iterkeys())
if attr_ids != other_attr_ids:
return "platform_id=%r: attribute IDs are different: %r != %r" % (
self.platform_id, attr_ids, other_attr_ids)
for attr_id, attr in self.attrs.iteritems():
other_attr = other.attrs[attr_id]
diff = attr.diff(other_attr)
if diff:
return diff
# compare ports:
port_ids = set(self.ports.iterkeys())
other_port_ids = set(other.ports.iterkeys())
if port_ids != other_port_ids:
return "platform_id=%r: port IDs are different: %r != %r" % (
self.platform_id, port_ids, other_port_ids)
for port_id, port in self.ports.iteritems():
other_port = other.ports[port_id]
diff = port.diff(other_port)
if diff:
return diff
# compare sub-platforms:
subplatform_ids = set(self.subplatforms.iterkeys())
other_subplatform_ids = set(other.subplatforms.iterkeys())
if subplatform_ids != other_subplatform_ids:
return "platform_id=%r: subplatform IDs are different: %r != %r" % (
self.platform_id,
subplatform_ids, other_subplatform_ids)
for platform_id, node in self.subplatforms.iteritems():
other_node = other.subplatforms[platform_id]
diff = node.diff(other_node)
if diff:
return diff
return None
def _compute_checksum(self):
hash_obj = hashlib.sha1()
# update with checksum of sub-platforms:
hash_obj.update("subplatforms:")
for key in sorted(self.subplatforms.keys()):
subplatform = self.subplatforms[key]
hash_obj.update(subplatform.compute_checksum())
# now, with info about the platform itself.
# id:
hash_obj.update("platform_id=%s;" % self.platform_id)
# platform_types:
hash_obj.update("platform_types:")
for platform_type in sorted(self.platform_types):
hash_obj.update("%s;" % platform_type)
# attributes:
hash_obj.update("platform_attributes:")
for key in sorted(self.attrs.keys()):
attr = self.attrs[key]
hash_obj.update(attr.compute_checksum())
# ports:
hash_obj.update("platform_ports:")
for key in sorted(self.ports.keys()):
port = self.ports[key]
hash_obj.update(port.compute_checksum())
return hash_obj.hexdigest()
class NetworkDefinition(BaseNode):
"""
Represents a platform network definition in terms of platform types and
topology, including attributes and ports associated with the platforms.
See NetworkUtil for serialization/deserialization of objects of this type
and other associated utilities.
"""
def __init__(self):
BaseNode.__init__(self)
self._platform_types = {}
self._pnodes = {}
# _dummy_root is a dummy PlatformNode having as children the actual roots in
# the network.
self._dummy_root = None
@property
def platform_types(self):
"""
Returns the platform types in the network.
@return {platform_type : description} dict
"""
return self._platform_types
@property
def pnodes(self):
"""
Returns a dict of all PlatformNodes in the network indexed by the platform ID.
@return {platform_id : PlatformNode} map
"""
return self._pnodes
@property
def root(self):
"""
Returns the root PlatformNode. Can be None if there is no such root or
there are multiple root PlatformNode's. The expected normal situation
is to have single root.
@return the root PlatformNode.
"""
root = None
if self._dummy_root and len(self._dummy_root.subplatforms) == 1:
root = self._dummy_root.subplatforms.values()[0]
return root
def get_map(self):
"""
Helper for getting the list of (platform_id, parent_platform_id) pairs.
"""
return self._dummy_root.get_map([])
def diff(self, other):
"""
Returns None if the two objects represent the same network definition.
Otherwise, returns a message describing the first difference.
"""
# compare platform_type definitions:
if set(self.platform_types.items()) != set(other.platform_types.items()):
return "platform types are different: %r != %r" % (
self.platform_types, other.platform_types)
# compare topology
if (self.root is None) != (other.root is None):
return "roots are different: %r != %r" % (
self.root, other.root)
if self.root is not None:
return self.root.diff(other.root)
else:
return None
def _compute_checksum(self):
hash_obj = hashlib.sha1()
# platform_types:
hash_obj.update("platform_types:")
for key in sorted(self.platform_types.keys()):
platform_type = self.platform_types[key]
hash_obj.update("%s=%s;" % (key, platform_type))
# root PlatformNode:
hash_obj.update("root_platform=%s;" % self.root.compute_checksum())
return hash_obj.hexdigest()
| bsd-2-clause |
LethusTI/supportcenter | vendor/django/django/db/backends/creation.py | 81 | 18044 | import sys
import time
from django.conf import settings
from django.db.utils import load_backend
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation(object):
"""
This class encapsulates all backend-specific differences that pertain to
database *creation*, such as the column types to use for particular Django
Fields, the SQL used to create and destroy tables, and the creation and
destruction of test databases.
"""
data_types = {}
def __init__(self, connection):
self.connection = connection
def _digest(self, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
return '%x' % (abs(hash(args)) % 4294967296L) # 2**32
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
opts = model._meta
if not opts.managed or opts.proxy:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
if not f.null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
tablespace_sql = self.connection.ops.tablespace_sql(
tablespace, inline=True)
if tablespace_sql:
field_output.append(tablespace_sql)
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(
f, known_models, style)
if pending:
pending_references.setdefault(f.rel.to, []).append(
(model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' %
", ".join(
[style.SQL_FIELD(qn(opts.get_field(f).column))
for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' +
style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(
' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(
opts.db_tablespace)
if tablespace_sql:
full_statement.append(tablespace_sql)
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary
# keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table,
auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"""
Return the SQL snippet defining the foreign key reference for a field.
"""
qn = self.connection.ops.quote_name
if field.rel.to in known_models:
output = [style.SQL_KEYWORD('REFERENCES') + ' ' +
style.SQL_TABLE(qn(field.rel.to._meta.db_table)) + ' (' +
style.SQL_FIELD(qn(field.rel.to._meta.get_field(
field.rel.field_name).column)) + ')' +
self.connection.ops.deferrable_sql()
]
pending = False
else:
# We haven't yet created the table to which this field
# is related, so save it for later.
output = []
pending = True
return output, pending
def sql_for_pending_references(self, model, style, pending_references):
"""
Returns any ALTER TABLE statements to add constraints after the fact.
"""
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
qn = self.connection.ops.quote_name
final_output = []
opts = model._meta
if model in pending_references:
for rel_class, f in pending_references[model]:
rel_opts = rel_class._meta
r_table = rel_opts.db_table
r_col = f.column
table = opts.db_table
col = opts.get_field(f.rel.field_name).column
# For MySQL, r_name must be unique in the first 64 characters.
# So we are careful with character usage here.
r_name = '%s_refs_%s_%s' % (
r_col, col, self._digest(r_table, table))
final_output.append(style.SQL_KEYWORD('ALTER TABLE') +
' %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s (%s)%s;' %
(qn(r_table), qn(truncate_name(
r_name, self.connection.ops.max_name_length())),
qn(r_col), qn(table), qn(col),
self.connection.ops.deferrable_sql()))
del pending_references[model]
return final_output
def sql_indexes_for_model(self, model, style):
"""
Returns the CREATE INDEX SQL statements for a single model.
"""
if not model._meta.managed or model._meta.proxy:
return []
output = []
for f in model._meta.local_fields:
output.extend(self.sql_indexes_for_field(model, f, style))
return output
def sql_indexes_for_field(self, model, f, style):
"""
Return the CREATE INDEX SQL statements for a single model field.
"""
from django.db.backends.util import truncate_name
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(tablespace)
if tablespace_sql:
tablespace_sql = ' ' + tablespace_sql
else:
tablespace_sql = ''
i_name = '%s_%s' % (model._meta.db_table, self._digest(f.column))
output = [style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(
i_name, self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(model._meta.db_table)) + ' ' +
"(%s)" % style.SQL_FIELD(qn(f.column)) +
"%s;" % tablespace_sql]
else:
output = []
return output
def sql_destroy_model(self, model, references_to_delete, style):
"""
Return the DROP TABLE and restraint dropping statements for a single
model.
"""
if not model._meta.managed or model._meta.proxy:
return []
# Drop the table now
qn = self.connection.ops.quote_name
output = ['%s %s;' % (style.SQL_KEYWORD('DROP TABLE'),
style.SQL_TABLE(qn(model._meta.db_table)))]
if model in references_to_delete:
output.extend(self.sql_remove_table_constraints(
model, references_to_delete, style))
if model._meta.has_auto_field:
ds = self.connection.ops.drop_sequence_sql(model._meta.db_table)
if ds:
output.append(ds)
return output
def sql_remove_table_constraints(self, model, references_to_delete, style):
from django.db.backends.util import truncate_name
if not model._meta.managed or model._meta.proxy:
return []
output = []
qn = self.connection.ops.quote_name
for rel_class, f in references_to_delete[model]:
table = rel_class._meta.db_table
col = f.column
r_table = model._meta.db_table
r_col = model._meta.get_field(f.rel.field_name).column
r_name = '%s_refs_%s_%s' % (
col, r_col, self._digest(table, r_table))
output.append('%s %s %s %s;' % \
(style.SQL_KEYWORD('ALTER TABLE'),
style.SQL_TABLE(qn(table)),
style.SQL_KEYWORD(self.connection.ops.drop_foreignkey_sql()),
style.SQL_FIELD(qn(truncate_name(
r_name, self.connection.ops.max_name_length())))))
del references_to_delete[model]
return output
def create_test_db(self, verbosity=1, autoclobber=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
test_database_name = self._get_test_db_name()
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print "Creating test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr)
self._create_test_db(verbosity, autoclobber)
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
# Confirm the feature set of the test database
self.connection.features.confirm()
# Report syncdb messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias)
from django.core.cache import get_cache
from django.core.cache.backends.db import BaseDatabaseCache
for cache_alias in settings.CACHES:
cache = get_cache(cache_alias)
if isinstance(cache, BaseDatabaseCache):
call_command('createcachetable', cache._table,
database=self.connection.alias)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
self.connection.cursor()
return test_database_name
def _get_test_db_name(self):
"""
Internal implementation - returns the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
or 'TEST_NAME' settings.
"""
if self.connection.settings_dict['TEST_NAME']:
return self.connection.settings_dict['TEST_NAME']
return TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
def _create_test_db(self, verbosity, autoclobber):
"""
Internal implementation - creates the test db tables.
"""
suffix = self.sql_table_creation_suffix()
test_database_name = self._get_test_db_name()
qn = self.connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = self.connection.cursor()
self._prepare_for_test_db_ddl()
try:
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name), suffix))
except Exception, e:
sys.stderr.write(
"Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input(
"Type 'yes' if you would like to try deleting the test "
"database '%s', or 'no' to cancel: " % test_database_name)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print ("Destroying old test database '%s'..."
% self.connection.alias)
cursor.execute(
"DROP DATABASE %s" % qn(test_database_name))
cursor.execute(
"CREATE DATABASE %s %s" % (qn(test_database_name),
suffix))
except Exception, e:
sys.stderr.write(
"Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
return test_database_name
def destroy_test_db(self, old_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists.
"""
self.connection.close()
test_database_name = self.connection.settings_dict['NAME']
if verbosity >= 1:
test_db_repr = ''
if verbosity >= 2:
test_db_repr = " ('%s')" % test_database_name
print "Destroying test database for alias '%s'%s..." % (
self.connection.alias, test_db_repr)
# Temporarily use a new connection and a copy of the settings dict.
# This prevents the production database from being exposed to potential
# child threads while (or after) the test database is destroyed.
# Refs #10868 and #17786.
settings_dict = self.connection.settings_dict.copy()
settings_dict['NAME'] = old_database_name
backend = load_backend(settings_dict['ENGINE'])
new_connection = backend.DatabaseWrapper(
settings_dict,
alias='__destroy_test_db__',
allow_thread_sharing=False)
new_connection.creation._destroy_test_db(test_database_name, verbosity)
def _destroy_test_db(self, test_database_name, verbosity):
"""
Internal implementation - remove the test db tables.
"""
# Remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
cursor = self.connection.cursor()
self._prepare_for_test_db_ddl()
# Wait to avoid "database is being accessed by other users" errors.
time.sleep(1)
cursor.execute("DROP DATABASE %s"
% self.connection.ops.quote_name(test_database_name))
self.connection.close()
def set_autocommit(self):
"""
Make sure a connection is in autocommit mode. - Deprecated, not used
anymore by Django code. Kept for compatibility with user code that
might use it.
"""
pass
def _prepare_for_test_db_ddl(self):
"""
Internal implementation - Hook for tasks that should be performed
before the ``CREATE DATABASE``/``DROP DATABASE`` clauses used by
testing code to create/ destroy test databases. Needed e.g. in
PostgreSQL to rollback and close any active transaction.
"""
pass
def sql_table_creation_suffix(self):
"""
SQL to append to the end of the test table creation statements.
"""
return ''
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME']
)
| gpl-3.0 |
dednal/chromium.src | third_party/markdown/util.py | 109 | 5981 | # -*- coding: utf-8 -*-
# markdown is released under the BSD license
# Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
# Copyright 2004 Manfred Stienstra (the original version)
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
import re
import sys
"""
Python 3 Stuff
=============================================================================
"""
PY3 = sys.version_info[0] == 3
if PY3:
string_type = str
text_type = str
int2str = chr
else:
string_type = basestring
text_type = unicode
int2str = unichr
"""
Constants you might want to modify
-----------------------------------------------------------------------------
"""
BLOCK_LEVEL_ELEMENTS = re.compile("^(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
"|script|noscript|form|fieldset|iframe|math"
"|hr|hr/|style|li|dt|dd|thead|tbody"
"|tr|th|td|section|footer|header|group|figure"
"|figcaption|aside|article|canvas|output"
"|progress|video)$", re.IGNORECASE)
# Placeholders
STX = '\u0002' # Use STX ("Start of text") for start-of-placeholder
ETX = '\u0003' # Use ETX ("End of text") for end-of-placeholder
INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]{4})')
AMP_SUBSTITUTE = STX+"amp"+ETX
"""
Constants you probably do not need to change
-----------------------------------------------------------------------------
"""
RTL_BIDI_RANGES = ( ('\u0590', '\u07FF'),
# Hebrew (0590-05FF), Arabic (0600-06FF),
# Syriac (0700-074F), Arabic supplement (0750-077F),
# Thaana (0780-07BF), Nko (07C0-07FF).
('\u2D30', '\u2D7F'), # Tifinagh
)
# Extensions should use "markdown.util.etree" instead of "etree" (or do `from
# markdown.util import etree`). Do not import it by yourself.
try: # Is the C implemenation of ElementTree available?
import xml.etree.cElementTree as etree
from xml.etree.ElementTree import Comment
# Serializers (including ours) test with non-c Comment
etree.test_comment = Comment
if etree.VERSION < "1.0.5":
raise RuntimeError("cElementTree version 1.0.5 or higher is required.")
except (ImportError, RuntimeError):
# Use the Python implementation of ElementTree?
import xml.etree.ElementTree as etree
if etree.VERSION < "1.1":
raise RuntimeError("ElementTree version 1.1 or higher is required")
"""
AUXILIARY GLOBAL FUNCTIONS
=============================================================================
"""
def isBlockLevel(tag):
"""Check if the tag is a block level HTML tag."""
if isinstance(tag, string_type):
return BLOCK_LEVEL_ELEMENTS.match(tag)
# Some ElementTree tags are not strings, so return False.
return False
"""
MISC AUXILIARY CLASSES
=============================================================================
"""
class AtomicString(text_type):
"""A string which should not be further processed."""
pass
class Processor(object):
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class HtmlStash(object):
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__ (self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
* safe: label an html segment as safe for safemode
Returns : a placeholder string
"""
self.rawHtmlBlocks.append((html, safe))
placeholder = self.get_placeholder(self.html_counter)
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
def get_placeholder(self, key):
return "%swzxhzdk:%d%s" % (STX, key, ETX)
| bsd-3-clause |
Versent/ansible | test/units/parsing/test_splitter.py | 204 | 4425 | # coding: utf-8
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from nose import tools
from ansible.compat.tests import unittest
from ansible.parsing.splitter import split_args, parse_kv
# Tests using nose's test generators cannot use unittest base class.
# http://nose.readthedocs.org/en/latest/writing_tests.html#test-generators
class TestSplitter_Gen:
SPLIT_DATA = (
(u'a',
[u'a'],
{u'_raw_params': u'a'}),
(u'a=b',
[u'a=b'],
{u'a': u'b'}),
(u'a="foo bar"',
[u'a="foo bar"'],
{u'a': u'foo bar'}),
(u'"foo bar baz"',
[u'"foo bar baz"'],
{u'_raw_params': '"foo bar baz"'}),
(u'foo bar baz',
[u'foo', u'bar', u'baz'],
{u'_raw_params': u'foo bar baz'}),
(u'a=b c="foo bar"',
[u'a=b', u'c="foo bar"'],
{u'a': u'b', u'c': u'foo bar'}),
(u'a="echo \\"hello world\\"" b=bar',
[u'a="echo \\"hello world\\""', u'b=bar'],
{u'a': u'echo "hello world"', u'b': u'bar'}),
(u'a="multi\nline"',
[u'a="multi\nline"'],
{u'a': u'multi\nline'}),
(u'a="blank\n\nline"',
[u'a="blank\n\nline"'],
{u'a': u'blank\n\nline'}),
(u'a="blank\n\n\nlines"',
[u'a="blank\n\n\nlines"'],
{u'a': u'blank\n\n\nlines'}),
(u'a="a long\nmessage\\\nabout a thing\n"',
[u'a="a long\nmessage\\\nabout a thing\n"'],
{u'a': u'a long\nmessage\\\nabout a thing\n'}),
(u'a="multiline\nmessage1\\\n" b="multiline\nmessage2\\\n"',
[u'a="multiline\nmessage1\\\n"', u'b="multiline\nmessage2\\\n"'],
{u'a': 'multiline\nmessage1\\\n', u'b': u'multiline\nmessage2\\\n'}),
(u'a={{jinja}}',
[u'a={{jinja}}'],
{u'a': u'{{jinja}}'}),
(u'a={{ jinja }}',
[u'a={{ jinja }}'],
{u'a': u'{{ jinja }}'}),
(u'a="{{jinja}}"',
[u'a="{{jinja}}"'],
{u'a': u'{{jinja}}'}),
(u'a={{ jinja }}{{jinja2}}',
[u'a={{ jinja }}{{jinja2}}'],
{u'a': u'{{ jinja }}{{jinja2}}'}),
(u'a="{{ jinja }}{{jinja2}}"',
[u'a="{{ jinja }}{{jinja2}}"'],
{u'a': u'{{ jinja }}{{jinja2}}'}),
(u'a={{jinja}} b={{jinja2}}',
[u'a={{jinja}}', u'b={{jinja2}}'],
{u'a': u'{{jinja}}', u'b': u'{{jinja2}}'}),
(u'a="{{jinja}}\n" b="{{jinja2}}\n"',
[u'a="{{jinja}}\n"', u'b="{{jinja2}}\n"'],
{u'a': u'{{jinja}}\n', u'b': u'{{jinja2}}\n'}),
(u'a="café eñyei"',
[u'a="café eñyei"'],
{u'a': u'café eñyei'}),
(u'a=café b=eñyei',
[u'a=café', u'b=eñyei'],
{u'a': u'café', u'b': u'eñyei'}),
)
def check_split_args(self, args, expected):
tools.eq_(split_args(args), expected)
def test_split_args(self):
for datapoint in self.SPLIT_DATA:
yield self.check_split_args, datapoint[0], datapoint[1]
def check_parse_kv(self, args, expected):
tools.eq_(parse_kv(args), expected)
def test_parse_kv(self):
for datapoint in self.SPLIT_DATA:
try:
yield self.check_parse_kv, datapoint[0], datapoint[2]
except: pass
| gpl-3.0 |
jmcorgan/gnuradio | gr-audio/examples/python/dial_tone.py | 58 | 2150 | #!/usr/bin/env python
#
# Copyright 2004,2005,2007,2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = OptionParser(option_class=eng_option)
parser.add_option("-O", "--audio-output", type="string", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
sample_rate = int(options.sample_rate)
ampl = 0.1
src0 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 350, ampl)
src1 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 440, ampl)
dst = audio.sink(sample_rate, options.audio_output)
self.connect(src0, (dst, 0))
self.connect(src1, (dst, 1))
if __name__ == '__main__':
try:
my_top_block().run()
except KeyboardInterrupt:
pass
| gpl-3.0 |
liorvh/phantomjs | src/qt/qtwebkit/Source/WebInspectorUI/Scripts/cssmin.py | 120 | 2021 | #!/usr/bin/python
# Copyright (C) 2013 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import re
def cssminify(css):
rules = (
(r"\/\*.*?\*\/", ""), # delete comments
(r"\n", ""), # delete new lines
(r"\s+", " "), # change multiple spaces to one space
(r"\s?([;:{},+>])\s?", r"\1"), # delete space where it is not needed
(r";}", "}") # change ';}' to '}' because the semicolon is not needed
)
css = css.replace("\r\n", "\n")
for rule in rules:
css = re.compile(rule[0], re.MULTILINE | re.UNICODE | re.DOTALL).sub(rule[1], css)
return css
if __name__ == "__main__":
import sys
sys.stdout.write(cssminify(sys.stdin.read()))
| bsd-3-clause |
tyler-smith/OpenBazaar-Server | market/tests/test_protocol.py | 6 | 1579 | from twisted.trial import unittest
from twisted.python import log
from dht.node import Node
from dht.utils import digest
from dht.routing import RoutingTable
from market.protocol import MarketProtocol
from dht.tests.utils import mknode
class MarketProtocolTest(unittest.TestCase):
def setUp(self):
self.catcher = []
observer = self.catcher.append
log.addObserver(observer)
self.addCleanup(log.removeObserver, observer)
self.node = Node(digest("test"), "127.0.0.1", 1234)
self.router = RoutingTable(self, 20, self.node.id)
def test_MarketProtocol_connect_multiplexer_correctly(self):
mp = MarketProtocol(0, 0, 0, 0)
self.assertEqual(mp.multiplexer, None)
mp.connect_multiplexer("3")
self.assertEqual(mp.multiplexer, "3")
def test_MarketProtocol_add_listener_correctly(self):
mp = MarketProtocol(0, 0, 0, 0)
self.assertEqual(len(mp.listeners), 0)
mp.add_listener(3)
self.assertEqual(len(mp.listeners), 1)
def test_MarketProtocol_rpc_get_image_invalid_image_hash(self):
catcher = self.catcher
mp = MarketProtocol(self.node, self.router, 0, 0)
self.assertEqual(None, mp.rpc_get_image(mknode(), "invalid_hash"))
catch_exception = catcher.pop()
exception_message = catcher.pop()
self.assertEquals(catch_exception["message"][0], "[WARNING] could not find image 696e76616c69645f68617368")
self.assertEquals(exception_message["message"][0], "[WARNING] Image hash is not 20 characters invalid_hash")
| mit |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/mercurial-2.2.3-py2.7-linux-x86_64-ucs4.egg/hgext/largefiles/proto.py | 1 | 6341 | # Copyright 2011 Fog Creek Software
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import os
import urllib2
from mercurial import error, httprepo, util, wireproto
from mercurial.i18n import _
import lfutil
LARGEFILES_REQUIRED_MSG = ('\nThis repository uses the largefiles extension.'
'\n\nPlease enable it in your Mercurial config '
'file.\n')
def putlfile(repo, proto, sha):
'''Put a largefile into a repository's local store and into the
user cache.'''
proto.redirect()
path = lfutil.storepath(repo, sha)
util.makedirs(os.path.dirname(path))
tmpfp = util.atomictempfile(path, createmode=repo.store.createmode)
try:
try:
proto.getfile(tmpfp)
tmpfp._fp.seek(0)
if sha != lfutil.hexsha1(tmpfp._fp):
raise IOError(0, _('largefile contents do not match hash'))
tmpfp.close()
lfutil.linktousercache(repo, sha)
except IOError, e:
repo.ui.warn(_('largefiles: failed to put %s into store: %s') %
(sha, e.strerror))
return wireproto.pushres(1)
finally:
tmpfp.discard()
return wireproto.pushres(0)
def getlfile(repo, proto, sha):
'''Retrieve a largefile from the repository-local cache or system
cache.'''
filename = lfutil.findfile(repo, sha)
if not filename:
raise util.Abort(_('requested largefile %s not present in cache') % sha)
f = open(filename, 'rb')
length = os.fstat(f.fileno())[6]
# Since we can't set an HTTP content-length header here, and
# Mercurial core provides no way to give the length of a streamres
# (and reading the entire file into RAM would be ill-advised), we
# just send the length on the first line of the response, like the
# ssh proto does for string responses.
def generator():
yield '%d\n' % length
for chunk in f:
yield chunk
return wireproto.streamres(generator())
def statlfile(repo, proto, sha):
'''Return '2\n' if the largefile is missing, '1\n' if it has a
mismatched checksum, or '0\n' if it is in good condition'''
filename = lfutil.findfile(repo, sha)
if not filename:
return '2\n'
fd = None
try:
fd = open(filename, 'rb')
return lfutil.hexsha1(fd) == sha and '0\n' or '1\n'
finally:
if fd:
fd.close()
def wirereposetup(ui, repo):
class lfileswirerepository(repo.__class__):
def putlfile(self, sha, fd):
# unfortunately, httprepository._callpush tries to convert its
# input file-like into a bundle before sending it, so we can't use
# it ...
if issubclass(self.__class__, httprepo.httprepository):
res = None
try:
res = self._call('putlfile', data=fd, sha=sha,
headers={'content-type':'application/mercurial-0.1'})
d, output = res.split('\n', 1)
for l in output.splitlines(True):
self.ui.warn(_('remote: '), l, '\n')
return int(d)
except (ValueError, urllib2.HTTPError):
self.ui.warn(_('unexpected putlfile response: %s') % res)
return 1
# ... but we can't use sshrepository._call because the data=
# argument won't get sent, and _callpush does exactly what we want
# in this case: send the data straight through
else:
try:
ret, output = self._callpush("putlfile", fd, sha=sha)
if ret == "":
raise error.ResponseError(_('putlfile failed:'),
output)
return int(ret)
except IOError:
return 1
except ValueError:
raise error.ResponseError(
_('putlfile failed (unexpected response):'), ret)
def getlfile(self, sha):
stream = self._callstream("getlfile", sha=sha)
length = stream.readline()
try:
length = int(length)
except ValueError:
self._abort(error.ResponseError(_("unexpected response:"),
length))
return (length, stream)
def statlfile(self, sha):
try:
return int(self._call("statlfile", sha=sha))
except (ValueError, urllib2.HTTPError):
# If the server returns anything but an integer followed by a
# newline, newline, it's not speaking our language; if we get
# an HTTP error, we can't be sure the largefile is present;
# either way, consider it missing.
return 2
repo.__class__ = lfileswirerepository
# advertise the largefiles=serve capability
def capabilities(repo, proto):
return capabilitiesorig(repo, proto) + ' largefiles=serve'
# duplicate what Mercurial's new out-of-band errors mechanism does, because
# clients old and new alike both handle it well
def webprotorefuseclient(self, message):
self.req.header([('Content-Type', 'application/hg-error')])
return message
def sshprotorefuseclient(self, message):
self.ui.write_err('%s\n-\n' % message)
self.fout.write('\n')
self.fout.flush()
return ''
def heads(repo, proto):
if lfutil.islfilesrepo(repo):
return wireproto.ooberror(LARGEFILES_REQUIRED_MSG)
return wireproto.heads(repo, proto)
def sshrepocallstream(self, cmd, **args):
if cmd == 'heads' and self.capable('largefiles'):
cmd = 'lheads'
if cmd == 'batch' and self.capable('largefiles'):
args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
return ssholdcallstream(self, cmd, **args)
def httprepocallstream(self, cmd, **args):
if cmd == 'heads' and self.capable('largefiles'):
cmd = 'lheads'
if cmd == 'batch' and self.capable('largefiles'):
args['cmds'] = args['cmds'].replace('heads ', 'lheads ')
return httpoldcallstream(self, cmd, **args)
| gpl-3.0 |
Venturi/cms | env/lib/python2.7/site-packages/pip/commands/list.py | 393 | 6814 | from pip.basecommand import Command
from pip.exceptions import DistributionNotFound, BestVersionAlreadyInstalled
from pip.index import PackageFinder
from pip.log import logger
from pip.req import InstallRequirement
from pip.util import get_installed_distributions, dist_is_editable
from pip.cmdoptions import make_option_group, index_group
class ListCommand(Command):
"""List installed packages, including editables."""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
# distributions to skip (python itself is reported by pkg_resources.working_set)
skip = ['python']
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages (excluding editables)')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages (excluding editables)')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help='If in a virtualenv that has global access, do not list globally-installed packages.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help="Include pre-release and development versions. By default, pip only finds stable versions.")
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(find_links=options.find_links,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
process_dependency_links=
options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
elif options.editable:
self.run_editables(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, remote_version_raw, remote_version_parsed in self.find_packages_latests_versions(options):
if remote_version_parsed > dist.parsed_version:
logger.notify('%s (Current: %s Latest: %s)' % (dist.project_name,
dist.version, remote_version_raw))
def find_packages_latests_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.notify('Ignoring indexes: %s' % ','.join(index_urls))
index_urls = []
if options.use_mirrors:
logger.deprecated("1.7",
"--use-mirrors has been deprecated and will be removed"
" in the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
if options.mirrors:
logger.deprecated("1.7",
"--mirrors has been deprecated and will be removed in "
" the future. Explicit uses of --index-url and/or "
"--extra-index-url is suggested.")
index_urls += options.mirrors
dependency_links = []
for dist in get_installed_distributions(local_only=options.local, skip=self.skip):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
session = self._build_session(options)
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(local_only=options.local, include_editables=False, skip=self.skip)
for dist in installed_packages:
req = InstallRequirement.from_line(dist.key, None)
try:
link = finder.find_requirement(req, True)
# If link is None, means installed version is most up-to-date
if link is None:
continue
except DistributionNotFound:
continue
except BestVersionAlreadyInstalled:
remote_version = req.installed_version
else:
# It might be a good idea that link or finder had a public method
# that returned version
remote_version = finder._link_package_versions(link, req.name)[0]
remote_version_raw = remote_version[2]
remote_version_parsed = remote_version[0]
yield dist, remote_version_raw, remote_version_parsed
def run_listing(self, options):
installed_packages = get_installed_distributions(local_only=options.local, skip=self.skip)
self.output_package_listing(installed_packages)
def run_editables(self, options):
installed_packages = get_installed_distributions(local_only=options.local, editables_only=True)
self.output_package_listing(installed_packages)
def output_package_listing(self, installed_packages):
installed_packages = sorted(installed_packages, key=lambda dist: dist.project_name.lower())
for dist in installed_packages:
if dist_is_editable(dist):
line = '%s (%s, %s)' % (dist.project_name, dist.version, dist.location)
else:
line = '%s (%s)' % (dist.project_name, dist.version)
logger.notify(line)
def run_uptodate(self, options):
uptodate = []
for dist, remote_version_raw, remote_version_parsed in self.find_packages_latests_versions(options):
if dist.parsed_version == remote_version_parsed:
uptodate.append(dist)
self.output_package_listing(uptodate)
| gpl-2.0 |
frontendphil/analyzr | parsr/migrations/0055_auto__add_packagemetric.py | 1 | 13796 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PackageMetric'
db.create_table(u'parsr_packagemetric', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('package', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['parsr.Package'])),
('revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['parsr.Revision'])),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['parsr.Author'])),
('date', self.gf('django.db.models.fields.DateTimeField')()),
('cyclomatic_complexity', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
('cyclomatic_complexity_delta', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
('halstead_volume', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
('halstead_volume_delta', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
('halstead_difficulty', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
('halstead_difficulty_delta', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
('halstead_effort', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
('halstead_effort_delta', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
('fan_in', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
('fan_in_delta', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
('fan_out', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
('fan_out_delta', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
('sloc', self.gf('django.db.models.fields.IntegerField')(default=0)),
('sloc_delta', self.gf('django.db.models.fields.IntegerField')(default=0)),
('hk', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
('hk_delta', self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=15, decimal_places=2)),
))
db.send_create_signal(u'parsr', ['PackageMetric'])
def backwards(self, orm):
# Deleting model 'PackageMetric'
db.delete_table(u'parsr_packagemetric')
models = {
u'parsr.author': {
'Meta': {'object_name': 'Author'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'parsr.branch': {
'Meta': {'object_name': 'Branch'},
'analyzed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'analyzed_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'analyzing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_analyze_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'last_measure_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'measured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'measuring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.Repo']", 'null': 'True'}),
'revision_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'parsr.file': {
'Meta': {'object_name': 'File'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.Author']", 'null': 'True'}),
'change_type': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True'}),
'copy_of': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.File']", 'null': 'True'}),
'cyclomatic_complexity': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'cyclomatic_complexity_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'fan_in': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'fan_in_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'fan_out': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'fan_out_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'halstead_difficulty': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'halstead_difficulty_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'halstead_effort': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'halstead_effort_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'halstead_volume': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'halstead_volume_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'hk': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'hk_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lines_added': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lines_removed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'package': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'null': 'True', 'to': u"orm['parsr.Package']"}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.Revision']"}),
'sloc': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sloc_delta': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'parsr.package': {
'Meta': {'object_name': 'Package'},
'branch': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.Branch']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'left': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': u"orm['parsr.Package']"}),
'right': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'parsr.packagemetric': {
'Meta': {'object_name': 'PackageMetric'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.Author']"}),
'cyclomatic_complexity': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'cyclomatic_complexity_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'fan_in': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'fan_in_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'fan_out': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'fan_out_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'halstead_difficulty': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'halstead_difficulty_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'halstead_effort': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'halstead_effort_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'halstead_volume': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'halstead_volume_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'hk': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
'hk_delta': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '15', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.Package']"}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.Revision']"}),
'sloc': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sloc_delta': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'parsr.repo': {
'Meta': {'object_name': 'Repo'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_files': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ignored_folders': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timezone': ('timezone_field.fields.TimeZoneField', [], {'default': "'Europe/Berlin'"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'parsr.revision': {
'Meta': {'object_name': 'Revision'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.Author']", 'null': 'True'}),
'branch': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.Branch']", 'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'day': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'hour': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'measured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'minute': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'month': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'next': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'previous'", 'null': 'True', 'to': u"orm['parsr.Revision']"}),
'weekday': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
}
}
complete_apps = ['parsr'] | mit |
kimoonkim/spark | python/pyspark/mllib/stat/KernelDensity.py | 118 | 1997 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version > '3':
xrange = range
import numpy as np
from pyspark.mllib.common import callMLlibFunc
from pyspark.rdd import RDD
class KernelDensity(object):
"""
Estimate probability density at required points given an RDD of samples
from the population.
>>> kd = KernelDensity()
>>> sample = sc.parallelize([0.0, 1.0])
>>> kd.setSample(sample)
>>> kd.estimate([0.0, 1.0])
array([ 0.12938758, 0.12938758])
"""
def __init__(self):
self._bandwidth = 1.0
self._sample = None
def setBandwidth(self, bandwidth):
"""Set bandwidth of each sample. Defaults to 1.0"""
self._bandwidth = bandwidth
def setSample(self, sample):
"""Set sample points from the population. Should be a RDD"""
if not isinstance(sample, RDD):
raise TypeError("samples should be a RDD, received %s" % type(sample))
self._sample = sample
def estimate(self, points):
"""Estimate the probability density at points"""
points = list(points)
densities = callMLlibFunc(
"estimateKernelDensity", self._sample, self._bandwidth, points)
return np.asarray(densities)
| apache-2.0 |
azureplus/hue | desktop/libs/libsolr/src/libsolr/api.py | 5 | 26836 | #!/usr/bin/env python
# -- coding: utf-8 --
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import urllib
from itertools import groupby
from django.utils.translation import ugettext as _
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_str
from desktop.lib.rest.http_client import HttpClient, RestException
from desktop.lib.rest import resource
from search.conf import EMPTY_QUERY, SECURITY_ENABLED
from search.api import _compute_range_facet
import re
LOG = logging.getLogger(__name__)
DEFAULT_USER = 'hue'
def utf_quoter(what):
return urllib.quote(unicode(what).encode('utf-8'), safe='~@#$&()*!+=;,.?/\'')
class SolrApi(object):
"""
http://wiki.apache.org/solr/CoreAdmin#CoreAdminHandler
"""
def __init__(self, solr_url, user, security_enabled=SECURITY_ENABLED.get()):
self._url = solr_url
self._user = user
self._client = HttpClient(self._url, logger=LOG)
self.security_enabled = security_enabled
if self.security_enabled:
self._client.set_kerberos_auth()
self._root = resource.Resource(self._client)
def _get_params(self):
if self.security_enabled:
return (('doAs', self._user ),)
return (('user.name', DEFAULT_USER), ('doAs', self._user),)
def _get_q(self, query):
q_template = '(%s)' if len(query['qs']) >= 2 else '%s'
return 'OR'.join([q_template % (q['q'] or EMPTY_QUERY.get()) for q in query['qs']]).encode('utf-8')
def _get_aggregate_function(self, facet):
props = {
'field': facet['field'],
'aggregate': facet['properties']['aggregate'] if 'properties' in facet else facet['aggregate']
}
if props['aggregate'] == 'median':
return 'percentile(%(field)s,50)' % props
else:
return '%(aggregate)s(%(field)s)' % props
def _get_range_borders(self, collection, query):
props = {}
GAPS = {
'5MINUTES': {
'histogram-widget': {'coeff': '+3', 'unit': 'SECONDS'}, # ~100 slots
'bucket-widget': {'coeff': '+3', 'unit': 'SECONDS'}, # ~100 slots
'facet-widget': {'coeff': '+1', 'unit': 'MINUTES'}, # ~10 slots
},
'30MINUTES': {
'histogram-widget': {'coeff': '+20', 'unit': 'SECONDS'},
'bucket-widget': {'coeff': '+20', 'unit': 'SECONDS'},
'facet-widget': {'coeff': '+5', 'unit': 'MINUTES'},
},
'1HOURS': {
'histogram-widget': {'coeff': '+30', 'unit': 'SECONDS'},
'bucket-widget': {'coeff': '+30', 'unit': 'SECONDS'},
'facet-widget': {'coeff': '+10', 'unit': 'MINUTES'},
},
'12HOURS': {
'histogram-widget': {'coeff': '+7', 'unit': 'MINUTES'},
'bucket-widget': {'coeff': '+7', 'unit': 'MINUTES'},
'facet-widget': {'coeff': '+1', 'unit': 'HOURS'},
},
'1DAYS': {
'histogram-widget': {'coeff': '+15', 'unit': 'MINUTES'},
'bucket-widget': {'coeff': '+15', 'unit': 'MINUTES'},
'facet-widget': {'coeff': '+3', 'unit': 'HOURS'},
},
'2DAYS': {
'histogram-widget': {'coeff': '+30', 'unit': 'MINUTES'},
'bucket-widget': {'coeff': '+30', 'unit': 'MINUTES'},
'facet-widget': {'coeff': '+6', 'unit': 'HOURS'},
},
'7DAYS': {
'histogram-widget': {'coeff': '+3', 'unit': 'HOURS'},
'bucket-widget': {'coeff': '+3', 'unit': 'HOURS'},
'facet-widget': {'coeff': '+1', 'unit': 'DAYS'},
},
'1MONTHS': {
'histogram-widget': {'coeff': '+12', 'unit': 'HOURS'},
'bucket-widget': {'coeff': '+12', 'unit': 'HOURS'},
'facet-widget': {'coeff': '+5', 'unit': 'DAYS'},
},
'3MONTHS': {
'histogram-widget': {'coeff': '+1', 'unit': 'DAYS'},
'bucket-widget': {'coeff': '+1', 'unit': 'DAYS'},
'facet-widget': {'coeff': '+30', 'unit': 'DAYS'},
},
'1YEARS': {
'histogram-widget': {'coeff': '+3', 'unit': 'DAYS'},
'bucket-widget': {'coeff': '+3', 'unit': 'DAYS'},
'facet-widget': {'coeff': '+12', 'unit': 'MONTHS'},
},
'2YEARS': {
'histogram-widget': {'coeff': '+7', 'unit': 'DAYS'},
'bucket-widget': {'coeff': '+7', 'unit': 'DAYS'},
'facet-widget': {'coeff': '+3', 'unit': 'MONTHS'},
},
'10YEARS': {
'histogram-widget': {'coeff': '+1', 'unit': 'MONTHS'},
'bucket-widget': {'coeff': '+1', 'unit': 'MONTHS'},
'facet-widget': {'coeff': '+1', 'unit': 'YEARS'},
}
}
time_field = collection['timeFilter'].get('field')
if time_field and (collection['timeFilter']['value'] != 'all' or collection['timeFilter']['type'] == 'fixed'):
# fqs overrides main time filter
fq_time_ids = [fq['id'] for fq in query['fqs'] if fq['field'] == time_field]
props['time_filter_overrides'] = fq_time_ids
props['time_field'] = time_field
if collection['timeFilter']['type'] == 'rolling':
props['field'] = collection['timeFilter']['field']
props['from'] = 'NOW-%s' % collection['timeFilter']['value']
props['to'] = 'NOW'
props['gap'] = GAPS.get(collection['timeFilter']['value'])
elif collection['timeFilter']['type'] == 'fixed':
props['field'] = collection['timeFilter']['field']
props['from'] = collection['timeFilter']['from']
props['to'] = collection['timeFilter']['to']
props['fixed'] = True
return props
def _get_time_filter_query(self, timeFilter, facet):
if 'fixed' in timeFilter:
props = {}
stat_facet = {'min': timeFilter['from'], 'max': timeFilter['to']}
_compute_range_facet(facet['widgetType'], stat_facet, props, stat_facet['min'], stat_facet['max'])
gap = props['gap']
unit = re.split('\d+', gap)[1]
return {
'start': '%(from)s/%(unit)s' % {'from': timeFilter['from'], 'unit': unit},
'end': '%(to)s/%(unit)s' % {'to': timeFilter['to'], 'unit': unit},
'gap': '%(gap)s' % props, # add a 'auto'
}
else:
gap = timeFilter['gap'][facet['widgetType']]
return {
'start': '%(from)s/%(unit)s' % {'from': timeFilter['from'], 'unit': gap['unit']},
'end': '%(to)s/%(unit)s' % {'to': timeFilter['to'], 'unit': gap['unit']},
'gap': '%(coeff)s%(unit)s/%(unit)s' % gap, # add a 'auto'
}
def _get_fq(self, collection, query):
params = ()
timeFilter = {}
if collection:
timeFilter = self._get_range_borders(collection, query)
if timeFilter and not timeFilter.get('time_filter_overrides'):
params += (('fq', urllib.unquote(utf_quoter('%(field)s:[%(from)s TO %(to)s]' % timeFilter))),)
# Merge facets queries on same fields
grouped_fqs = groupby(query['fqs'], lambda x: (x['type'], x['field']))
merged_fqs = []
for key, group in grouped_fqs:
field_fq = next(group)
for fq in group:
for f in fq['filter']:
field_fq['filter'].append(f)
merged_fqs.append(field_fq)
for fq in merged_fqs:
if fq['type'] == 'field':
fields = fq['field'] if type(fq['field']) == list else [fq['field']] # 2D facets support
for field in fields:
f = []
for _filter in fq['filter']:
values = _filter['value'] if type(_filter['value']) == list else [_filter['value']] # 2D facets support
if fields.index(field) < len(values): # Lowest common field denominator
value = values[fields.index(field)]
exclude = '-' if _filter['exclude'] else ''
if value is not None and ' ' in smart_str(value):
value = smart_str(value).replace('"', '\\"')
f.append('%s%s:"%s"' % (exclude, field, value))
else:
f.append('%s{!field f=%s}%s' % (exclude, field, value))
_params ='{!tag=%(id)s}' % fq + ' '.join(f)
params += (('fq', urllib.unquote(utf_quoter(_params))),)
elif fq['type'] == 'range':
params += (('fq', '{!tag=%(id)s}' % fq + ' '.join([urllib.unquote(
utf_quoter('%s%s:[%s TO %s}' % ('-' if field['exclude'] else '', fq['field'], f['from'], f['to']))) for field, f in zip(fq['filter'], fq['properties'])])),)
elif fq['type'] == 'range-up':
params += (('fq', '{!tag=%(id)s}' % fq + ' '.join([urllib.unquote(
utf_quoter('%s%s:[%s TO %s}' % ('-' if field['exclude'] else '', fq['field'], f['from'] if fq['is_up'] else '*', '*' if fq['is_up'] else f['from'])))
for field, f in zip(fq['filter'], fq['properties'])])),)
elif fq['type'] == 'map':
_keys = fq.copy()
_keys.update(fq['properties'])
params += (('fq', '{!tag=%(id)s}' % fq + urllib.unquote(
utf_quoter('%(lat)s:[%(lat_sw)s TO %(lat_ne)s} AND %(lon)s:[%(lon_sw)s TO %(lon_ne)s}' % _keys))),)
return params
def query(self, collection, query):
solr_query = {}
solr_query['collection'] = collection['name']
if query.get('download'):
solr_query['rows'] = 1000
solr_query['start'] = 0
else:
solr_query['rows'] = int(collection['template']['rows'] or 10)
solr_query['start'] = int(query['start'])
solr_query['rows'] = min(solr_query['rows'], 1000)
solr_query['start'] = min(solr_query['start'], 10000)
params = self._get_params() + (
('q', self._get_q(query)),
('wt', 'json'),
('rows', solr_query['rows']),
('start', solr_query['start']),
)
if any(collection['facets']):
params += (
('facet', 'true'),
('facet.mincount', 0),
('facet.limit', 10),
)
json_facets = {}
timeFilter = self._get_range_borders(collection, query)
for facet in collection['facets']:
if facet['type'] == 'query':
params += (('facet.query', '%s' % facet['field']),)
elif facet['type'] == 'range' or facet['type'] == 'range-up':
keys = {
'id': '%(id)s' % facet,
'field': facet['field'],
'key': '%(field)s-%(id)s' % facet,
'start': facet['properties']['start'],
'end': facet['properties']['end'],
'gap': facet['properties']['gap'],
'mincount': int(facet['properties']['mincount'])
}
if timeFilter and timeFilter['time_field'] == facet['field'] and (facet['id'] not in timeFilter['time_filter_overrides'] or facet['widgetType'] != 'histogram-widget'):
keys.update(self._get_time_filter_query(timeFilter, facet))
params += (
('facet.range', '{!key=%(key)s ex=%(id)s f.%(field)s.facet.range.start=%(start)s f.%(field)s.facet.range.end=%(end)s f.%(field)s.facet.range.gap=%(gap)s f.%(field)s.facet.mincount=%(mincount)s}%(field)s' % keys),
)
elif facet['type'] == 'field':
keys = {
'id': '%(id)s' % facet,
'field': facet['field'],
'key': '%(field)s-%(id)s' % facet,
'limit': int(facet['properties'].get('limit', 10)) + (1 if facet['widgetType'] == 'facet-widget' else 0),
'mincount': int(facet['properties']['mincount'])
}
params += (
('facet.field', '{!key=%(key)s ex=%(id)s f.%(field)s.facet.limit=%(limit)s f.%(field)s.facet.mincount=%(mincount)s}%(field)s' % keys),
)
elif facet['type'] == 'nested':
_f = {
'field': facet['field'],
'limit': int(facet['properties'].get('limit', 10)) + (1 if facet['widgetType'] == 'facet-widget' else 0),
'mincount': int(facet['properties']['mincount'])
}
if 'start' in facet['properties']:
_f.update({
'type': 'range',
'start': facet['properties']['start'],
'end': facet['properties']['end'],
'gap': facet['properties']['gap'],
})
if timeFilter and timeFilter['time_field'] == facet['field'] and (facet['id'] not in timeFilter['time_filter_overrides'] or facet['widgetType'] != 'bucket-widget'):
_f.update(self._get_time_filter_query(timeFilter, facet))
else:
_f.update({
'type': 'terms',
'field': facet['field'],
'excludeTags': facet['id']
})
if facet['properties']['facets']:
if facet['properties']['facets'][0]['aggregate'] == 'count':
_f['facet'] = {
'd2': {
'type': 'terms',
'field': '%(field)s' % facet['properties']['facets'][0],
'limit': int(facet['properties']['facets'][0].get('limit', 10)),
'mincount': int(facet['properties']['facets'][0]['mincount'])
}
}
if len(facet['properties']['facets']) > 1: # Get 3rd dimension calculation
_f['facet']['d2']['facet'] = {
'd2': self._get_aggregate_function(facet['properties']['facets'][1])
}
else:
_f['facet'] = {
'd2': self._get_aggregate_function(facet['properties']['facets'][0])
}
json_facets[facet['id']] = _f
elif facet['type'] == 'function':
json_facets[facet['id']] = self._get_aggregate_function(facet)
json_facets['processEmpty'] = True
elif facet['type'] == 'pivot':
if facet['properties']['facets'] or facet['widgetType'] == 'map-widget':
fields = facet['field']
fields_limits = []
for f in facet['properties']['facets']:
fields_limits.append('f.%s.facet.limit=%s' % (f['field'], f['limit']))
fields_limits.append('f.%s.facet.mincount=%s' % (f['field'], f['mincount']))
fields += ',' + f['field']
keys = {
'id': '%(id)s' % facet,
'key': '%(field)s-%(id)s' % facet,
'field': facet['field'],
'fields': fields,
'limit': int(facet['properties'].get('limit', 10)),
'mincount': int(facet['properties']['mincount']),
'fields_limits': ' '.join(fields_limits)
}
params += (
('facet.pivot', '{!key=%(key)s ex=%(id)s f.%(field)s.facet.limit=%(limit)s f.%(field)s.facet.mincount=%(mincount)s %(fields_limits)s}%(fields)s' % keys),
)
if json_facets:
params += (
('json.facet', json.dumps(json_facets)),
)
params += self._get_fq(collection, query)
if collection['template']['fieldsSelected'] and collection['template']['isGridLayout']:
fields = set(collection['template']['fieldsSelected'] + [collection['idField']] if collection['idField'] else [])
# Add field if needed
if collection['template']['leafletmap'].get('latitudeField'):
fields.add(collection['template']['leafletmap']['latitudeField'])
if collection['template']['leafletmap'].get('longitudeField'):
fields.add(collection['template']['leafletmap']['longitudeField'])
if collection['template']['leafletmap'].get('labelField'):
fields.add(collection['template']['leafletmap']['labelField'])
params += (('fl', urllib.unquote(utf_quoter(','.join(list(fields))))),)
else:
params += (('fl', '*'),)
params += (
('hl', 'true'),
('hl.fl', '*'),
('hl.snippets', 5),
('hl.fragsize', 1000),
)
if collection['template']['fieldsSelected']:
fields = []
for field in collection['template']['fieldsSelected']:
attribute_field = filter(lambda attribute: field == attribute['name'], collection['template']['fieldsAttributes'])
if attribute_field:
if attribute_field[0]['sort']['direction']:
fields.append('%s %s' % (field, attribute_field[0]['sort']['direction']))
if fields:
params += (
('sort', ','.join(fields)),
)
response = self._root.get('%(collection)s/select' % solr_query, params)
return self._get_json(response)
def suggest(self, solr_query, hue_core):
try:
params = self._get_params() + (
('q', solr_query['q']),
('wt', 'json'),
)
response = self._root.get('%(collection)s/suggest' % solr_query, params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def collections(self):
try:
params = self._get_params() + (
('detail', 'true'),
('path', '/clusterstate.json'),
)
response = self._root.get('zookeeper', params=params)
return json.loads(response['znode'].get('data', '{}'))
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def aliases(self):
try:
params = self._get_params() + (
('detail', 'true'),
('path', '/aliases.json'),
)
response = self._root.get('zookeeper', params=params)
return json.loads(response['znode'].get('data', '{}')).get('collection', {})
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def collection_or_core(self, hue_collection):
if hue_collection.is_core_only:
return self.core(hue_collection.name)
else:
return self.collection(hue_collection.name)
def collection(self, name):
try:
collections = self.collections()
return collections[name]
except Exception, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def create_collection(self, name, shards=1, replication=1):
try:
params = self._get_params() + (
('action', 'CREATE'),
('name', name),
('numShards', shards),
('replicationFactor', replication),
('collection.configName', name),
('wt', 'json')
)
response = self._root.post('admin/collections', params=params, contenttype='application/json')
if 'success' in response:
return True
else:
LOG.error("Could not create collection. Check response:\n%s" % json.dumps(response, indent=2))
return False
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def create_core(self, name, instance_dir, shards=1, replication=1):
try:
params = self._get_params() + (
('action', 'CREATE'),
('name', name),
('instanceDir', instance_dir),
('wt', 'json'),
)
response = self._root.post('admin/cores', params=params, contenttype='application/json')
if response.get('responseHeader',{}).get('status',-1) == 0:
return True
else:
LOG.error("Could not create core. Check response:\n%s" % json.dumps(response, indent=2))
return False
except RestException, e:
if 'already exists' in e.message:
LOG.warn("Could not create collection.", exc_info=True)
return False
else:
raise PopupException(e, title=_('Error while accessing Solr'))
def remove_collection(self, name, replication=1):
try:
params = self._get_params() + (
('action', 'DELETE'),
('name', name),
('replicationFactor', replication),
('wt', 'json')
)
response = self._root.post('admin/collections', params=params, contenttype='application/json')
if 'success' in response:
return True
else:
LOG.error("Could not remove collection. Check response:\n%s" % json.dumps(response, indent=2))
return False
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def remove_core(self, name):
try:
params = self._get_params() + (
('action', 'UNLOAD'),
('name', name),
('deleteIndex', 'true'),
('wt', 'json')
)
response = self._root.post('admin/cores', params=params, contenttype='application/json')
if 'success' in response:
return True
else:
LOG.error("Could not remove core. Check response:\n%s" % json.dumps(response, indent=2))
return False
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def add_fields(self, collection, fields):
try:
params = self._get_params()
return self._root.post('%s/schema/fields' % collection, params=params, data=json.dumps(fields), contenttype='application/json')
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def cores(self):
try:
params = self._get_params() + (
('wt', 'json'),
)
return self._root.get('admin/cores', params=params)['status']
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def core(self, core):
try:
params = self._get_params() + (
('wt', 'json'),
('core', core),
)
return self._root.get('admin/cores', params=params)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def schema(self, core):
try:
params = self._get_params() + (
('wt', 'json'),
('file', 'schema.xml'),
)
return self._root.get('%(core)s/admin/file' % {'core': core}, params=params)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def fields(self, core, dynamic=False):
try:
params = self._get_params() + (
('wt', 'json'),
('fl', '*'),
)
if not dynamic:
params += (('show', 'schema'),)
response = self._root.get('%(core)s/admin/luke' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def luke(self, core):
try:
params = self._get_params() + (
('wt', 'json'),
)
response = self._root.get('%(core)s/admin/luke' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def schema_fields(self, core):
try:
params = self._get_params() + (
('wt', 'json'),
)
response = self._root.get('%(core)s/schema/fields' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def stats(self, core, fields, query=None, facet=''):
try:
params = self._get_params() + (
('q', self._get_q(query) if query is not None else EMPTY_QUERY.get()),
('wt', 'json'),
('rows', 0),
('stats', 'true'),
)
if query is not None:
params += self._get_fq(None, query)
if facet:
params += (('stats.facet', facet),)
params += tuple([('stats.field', field) for field in fields])
response = self._root.get('%(core)s/select' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def terms(self, core, field, properties=None):
try:
params = self._get_params() + (
('wt', 'json'),
('rows', 0),
('terms.fl', field),
)
if properties:
for key, val in properties.iteritems():
params += ((key, val),)
response = self._root.get('%(core)s/terms' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def get(self, core, doc_id):
try:
params = self._get_params() + (
('id', doc_id),
('wt', 'json'),
)
response = self._root.get('%(core)s/get' % {'core': core}, params=params)
return self._get_json(response)
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
@classmethod
def _get_json(cls, response):
if type(response) != dict:
# Got 'plain/text' mimetype instead of 'application/json'
try:
response = json.loads(response)
except ValueError, e:
# Got some null bytes in the response
LOG.error('%s: %s' % (unicode(e), repr(response)))
response = json.loads(response.replace('\x00', ''))
return response
def uniquekey(self, collection):
try:
params = self._get_params() + (
('wt', 'json'),
)
response = self._root.get('%s/schema/uniquekey' % collection, params=params)
return self._get_json(response)['uniqueKey']
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
def update(self, collection_or_core_name, data, content_type='csv', version=None):
try:
if content_type == 'csv':
content_type = 'application/csv'
elif content_type == 'json':
content_type = 'application/json'
else:
LOG.error("Could not update index for %s. Unsupported content type %s. Allowed content types: csv" % (collection_or_core_name, content_type))
return False
params = self._get_params() + (
('wt', 'json'),
('overwrite', 'true'),
)
if version is not None:
params += (
('_version_', version),
('versions', 'true')
)
self._root.post('%s/update' % collection_or_core_name, contenttype=content_type, params=params, data=data)
return True
except RestException, e:
raise PopupException(e, title=_('Error while accessing Solr'))
| apache-2.0 |
pcwalton/rust | src/etc/extract_grammar.py | 9 | 3433 | #!/usr/bin/env python
# xfail-license
# This script is for extracting the grammar from the rust docs.
import fileinput
collections = { "gram": [],
"keyword": [],
"reserved": [],
"binop": [],
"unop": [] }
in_coll = False
coll = ""
for line in fileinput.input(openhook=fileinput.hook_encoded("utf-8")):
if in_coll:
if line.startswith("~~~~"):
in_coll = False
else:
if coll in ["keyword", "reserved", "binop", "unop"]:
for word in line.split():
if word not in collections[coll]:
collections[coll].append(word)
else:
collections[coll].append(line)
else:
if line.startswith("~~~~"):
for cname in collections:
if ("." + cname) in line:
coll = cname
in_coll = True
break
# Define operator symbol-names here
tokens = ["non_star", "non_slash", "non_eol",
"non_single_quote", "non_double_quote", "ident" ]
symnames = {
".": "dot",
"+": "plus",
"-": "minus",
"/": "slash",
"*": "star",
"%": "percent",
"~": "tilde",
"@": "at",
"!": "not",
"&": "and",
"|": "or",
"^": "xor",
"<<": "lsl",
">>": "lsr",
">>>": "asr",
"&&": "andand",
"||": "oror",
"<" : "lt",
"<=" : "le",
"==" : "eqeq",
">=" : "ge",
">" : "gt",
"=": "eq",
"+=": "plusequal",
"-=": "minusequal",
"/=": "divequal",
"*=": "starequal",
"%=": "percentequal",
"&=": "andequal",
"|=": "orequal",
"^=": "xorequal",
">>=": "lsrequal",
">>>=": "asrequal",
"<<=": "lslequal",
"::": "coloncolon",
"->": "rightarrow",
"<-": "leftarrow",
"<->": "swaparrow",
"//": "linecomment",
"/*": "openblockcomment",
"*/": "closeblockcomment",
"macro_rules": "macro_rules",
"=>" : "eg",
".." : "dotdot",
"," : "comma"
}
lines = []
for line in collections["gram"]:
line2 = ""
for word in line.split():
# replace strings with keyword-names or symbol-names from table
if word.startswith("\""):
word = word[1:-1]
if word in symnames:
word = symnames[word]
else:
for ch in word:
if not ch.isalpha():
raise Exception("non-alpha apparent keyword: "
+ word)
if word not in tokens:
if (word in collections["keyword"] or
word in collections["reserved"]):
tokens.append(word)
else:
raise Exception("unknown keyword/reserved word: "
+ word)
line2 += " " + word
lines.append(line2)
for word in collections["keyword"] + collections["reserved"]:
if word not in tokens:
tokens.append(word)
for sym in collections["unop"] + collections["binop"] + symnames.keys():
word = symnames[sym]
if word not in tokens:
tokens.append(word)
print("%start parser, token;")
print("%%token %s ;" % ("\n\t, ".join(tokens)))
for coll in ["keyword", "reserved"]:
print("%s: %s ; " % (coll, "\n\t| ".join(collections[coll])));
for coll in ["binop", "unop"]:
print("%s: %s ; " % (coll, "\n\t| ".join([symnames[x]
for x in collections[coll]])));
print("\n".join(lines));
| apache-2.0 |
vyrus/wubi | src/bittorrent/NatCheck.py | 13 | 2650 | # Written by Bram Cohen
# see LICENSE.txt for license information
from cStringIO import StringIO
from socket import error as socketerror
protocol_name = 'BitTorrent protocol'
# header, reserved, download id, my id, [length, message]
class NatCheck:
def __init__(self, resultfunc, downloadid, peerid, ip, port, rawserver):
self.resultfunc = resultfunc
self.downloadid = downloadid
self.peerid = peerid
self.ip = ip
self.port = port
self.closed = False
self.buffer = StringIO()
self.next_len = 1
self.next_func = self.read_header_len
try:
self.connection = rawserver.start_connection((ip, port), self)
self.connection.write(chr(len(protocol_name)) + protocol_name +
(chr(0) * 8) + downloadid)
except socketerror:
self.answer(False)
except IOError:
self.answer(False)
def answer(self, result):
self.closed = True
try:
self.connection.close()
except AttributeError:
pass
self.resultfunc(result, self.downloadid, self.peerid, self.ip, self.port)
def read_header_len(self, s):
if ord(s) != len(protocol_name):
return None
return len(protocol_name), self.read_header
def read_header(self, s):
if s != protocol_name:
return None
return 8, self.read_reserved
def read_reserved(self, s):
return 20, self.read_download_id
def read_download_id(self, s):
if s != self.downloadid:
return None
return 20, self.read_peer_id
def read_peer_id(self, s):
if s != self.peerid:
return None
self.answer(True)
return None
def data_came_in(self, connection, s):
while True:
if self.closed:
return
i = self.next_len - self.buffer.tell()
if i > len(s):
self.buffer.write(s)
return
self.buffer.write(s[:i])
s = s[i:]
m = self.buffer.getvalue()
self.buffer.reset()
self.buffer.truncate()
x = self.next_func(m)
if x is None:
if not self.closed:
self.answer(False)
return
self.next_len, self.next_func = x
def connection_lost(self, connection):
if not self.closed:
self.closed = True
self.resultfunc(False, self.downloadid, self.peerid, self.ip, self.port)
def connection_flushed(self, connection):
pass
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.