code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import cv2
import numpy as np
def hsv_hist(roi,
lower_color_bound,
upper_color_bound,
color_channel,
bin_nb,
channel_range):
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array(lower_color_bound), np.array(upper_color_bound))
roi_hist = cv2.calcHist([hsv_roi], color_channel, mask, bin_nb, channel_range)
cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
return roi_hist
|
vwrobel/cvtools
|
cvtools/processes/featurer/hsv_hist.py
|
Python
|
mit
| 499
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Comunitea Servicios Tecnológicos S.L.
# $Omar Castiñeira Saavedra$ <omar@comunitea.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api
class MrpProduction(models.Model):
_inherit = 'mrp.production'
@api.multi
def action_production_end(self):
res = super(MrpProduction, self).action_production_end()
under_min = self.env['product.stock.unsafety']
for production in self:
domain = [
('state', '=', 'in_action'),
('production_id', '=', production.id)
]
under_min_objs = under_min.search(domain)
if under_min_objs:
under_min_objs.write({'state': 'finalized'})
return res
@api.multi
def unlink(self):
under_min_obj = self.env['product.stock.unsafety']
for production in self:
under_mins = under_min_obj.search([('production_id', '=',
production.id)])
if under_mins:
under_mins.write({"state": "in_progress",
"production_id": False})
return super(MrpProduction, self).unlink()
|
jgmanzanas/CMNT_004_15
|
project-addons/product_stock_unsafety/mrp.py
|
Python
|
agpl-3.0
| 2,058
|
import os
import uuid
import httplib
import datetime
import jwe
import jwt
import furl
from flask import request
from flask import redirect
from flask import make_response
from modularodm.exceptions import NoResultsFound
from modularodm import Q
from framework import sentry
from framework.auth import cas
from framework.auth import Auth
from framework.auth import oauth_scopes
from framework.routing import json_renderer
from framework.sentry import log_exception
from framework.exceptions import HTTPError
from framework.transactions.context import TokuTransaction
from framework.transactions.handlers import no_auto_transaction
from framework.auth.decorators import must_be_logged_in, must_be_signed, collect_auth
from website import mails
from website import settings
from website.files.models import FileNode, TrashedFileNode, StoredFileNode
from website.project import decorators
from website.addons.base import exceptions
from website.addons.base import signals as file_signals
from website.addons.base import StorageAddonBase
from website.models import User, Node, NodeLog
from website.project.model import DraftRegistration, MetaSchema
from website.util import rubeus
from website.profile.utils import get_gravatar
from website.project.decorators import must_be_valid_project, must_be_contributor_or_public
from website.project.utils import serialize_node
# import so that associated listener is instantiated and gets emails
from website.notifications.events.files import FileEvent # noqa
FILE_GONE_ERROR_MESSAGE = u'''
<style>
.file-download{{display: none;}}
.file-share{{display: none;}}
.file-delete{{display: none;}}
</style>
<div class="alert alert-info" role="alert">
This link to the file "{file_name}" is no longer valid.
</div>'''
WATERBUTLER_JWE_KEY = jwe.kdf(settings.WATERBUTLER_JWE_SECRET.encode('utf-8'), settings.WATERBUTLER_JWE_SALT.encode('utf-8'))
@decorators.must_have_permission('write')
@decorators.must_not_be_registration
def disable_addon(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
addon_name = kwargs.get('addon')
if addon_name is None:
raise HTTPError(httplib.BAD_REQUEST)
deleted = node.delete_addon(addon_name, auth)
return {'deleted': deleted}
@must_be_logged_in
def get_addon_user_config(**kwargs):
user = kwargs['auth'].user
addon_name = kwargs.get('addon')
if addon_name is None:
raise HTTPError(httplib.BAD_REQUEST)
addon = user.get_addon(addon_name)
if addon is None:
raise HTTPError(httplib.BAD_REQUEST)
return addon.to_json(user)
permission_map = {
'create_folder': 'write',
'revisions': 'read',
'metadata': 'read',
'download': 'read',
'upload': 'write',
'delete': 'write',
'copy': 'write',
'move': 'write',
'copyto': 'write',
'moveto': 'write',
'copyfrom': 'read',
'movefrom': 'write',
}
def check_access(node, auth, action, cas_resp):
"""Verify that user can perform requested action on resource. Raise appropriate
error code if action cannot proceed.
"""
permission = permission_map.get(action, None)
if permission is None:
raise HTTPError(httplib.BAD_REQUEST)
if cas_resp:
if permission == 'read':
if node.is_public:
return True
required_scope = oauth_scopes.CoreScopes.NODE_FILE_READ
else:
required_scope = oauth_scopes.CoreScopes.NODE_FILE_WRITE
if not cas_resp.authenticated \
or required_scope not in oauth_scopes.normalize_scopes(cas_resp.attributes['accessTokenScope']):
raise HTTPError(httplib.FORBIDDEN)
if permission == 'read' and node.can_view(auth):
return True
if permission == 'write' and node.can_edit(auth):
return True
# Users attempting to register projects with components might not have
# `write` permissions for all components. This will result in a 403 for
# all `copyto` actions as well as `copyfrom` actions if the component
# in question is not public. To get around this, we have to recursively
# check the node's parent node to determine if they have `write`
# permissions up the stack.
# TODO(hrybacki): is there a way to tell if this is for a registration?
# All nodes being registered that receive the `copyto` action will have
# `node.is_registration` == True. However, we have no way of telling if
# `copyfrom` actions are originating from a node being registered.
# TODO This is raise UNAUTHORIZED for registrations that have not been archived yet
if action == 'copyfrom' or (action == 'copyto' and node.is_registration):
parent = node.parent_node
while parent:
if parent.can_edit(auth):
return True
parent = parent.parent_node
# Users with the PREREG_ADMIN_TAG should be allowed to download files
# from prereg challenge draft registrations.
try:
prereg_schema = MetaSchema.find_one(
Q('name', 'eq', 'Prereg Challenge') &
Q('schema_version', 'eq', 2)
)
allowed_nodes = [node] + node.parents
prereg_draft_registration = DraftRegistration.find(
Q('branched_from', 'in', [n._id for n in allowed_nodes]) &
Q('registration_schema', 'eq', prereg_schema)
)
if action == 'download' and \
auth.user is not None and \
prereg_draft_registration.count() > 0 and \
settings.PREREG_ADMIN_TAG in auth.user.system_tags:
return True
except NoResultsFound:
pass
raise HTTPError(httplib.FORBIDDEN if auth.user else httplib.UNAUTHORIZED)
def make_auth(user):
if user is not None:
return {
'id': user._id,
'email': '{}@osf.io'.format(user._id),
'name': user.fullname,
}
return {}
@collect_auth
def get_auth(auth, **kwargs):
cas_resp = None
if not auth.user:
# Central Authentication Server OAuth Bearer Token
authorization = request.headers.get('Authorization')
if authorization and authorization.startswith('Bearer '):
client = cas.get_client()
try:
access_token = cas.parse_auth_header(authorization)
cas_resp = client.profile(access_token)
except cas.CasError as err:
sentry.log_exception()
# NOTE: We assume that the request is an AJAX request
return json_renderer(err)
if cas_resp.authenticated:
auth.user = User.load(cas_resp.user)
try:
data = jwt.decode(
jwe.decrypt(request.args.get('payload', '').encode('utf-8'), WATERBUTLER_JWE_KEY),
settings.WATERBUTLER_JWT_SECRET,
options={'require_exp': True},
algorithm=settings.WATERBUTLER_JWT_ALGORITHM
)['data']
except (jwt.InvalidTokenError, KeyError):
raise HTTPError(httplib.FORBIDDEN)
if not auth.user:
auth.user = User.from_cookie(data.get('cookie', ''))
try:
action = data['action']
node_id = data['nid']
provider_name = data['provider']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
node = Node.load(node_id)
if not node:
raise HTTPError(httplib.NOT_FOUND)
check_access(node, auth, action, cas_resp)
provider_settings = node.get_addon(provider_name)
if not provider_settings:
raise HTTPError(httplib.BAD_REQUEST)
try:
credentials = provider_settings.serialize_waterbutler_credentials()
waterbutler_settings = provider_settings.serialize_waterbutler_settings()
except exceptions.AddonError:
log_exception()
raise HTTPError(httplib.BAD_REQUEST)
return {'payload': jwe.encrypt(jwt.encode({
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=settings.WATERBUTLER_JWT_EXPIRATION),
'data': {
'auth': make_auth(auth.user), # A waterbutler auth dict not an Auth object
'credentials': credentials,
'settings': waterbutler_settings,
'callback_url': node.api_url_for(
('create_waterbutler_log' if not node.is_registration else 'registration_callbacks'),
_absolute=True,
),
}
}, settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM), WATERBUTLER_JWE_KEY)}
LOG_ACTION_MAP = {
'move': NodeLog.FILE_MOVED,
'copy': NodeLog.FILE_COPIED,
'rename': NodeLog.FILE_RENAMED,
'create': NodeLog.FILE_ADDED,
'update': NodeLog.FILE_UPDATED,
'delete': NodeLog.FILE_REMOVED,
'create_folder': NodeLog.FOLDER_CREATED,
}
@must_be_signed
@no_auto_transaction
@must_be_valid_project
def create_waterbutler_log(payload, **kwargs):
with TokuTransaction():
try:
auth = payload['auth']
action = LOG_ACTION_MAP[payload['action']]
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
user = User.load(auth['id'])
if user is None:
raise HTTPError(httplib.BAD_REQUEST)
auth = Auth(user=user)
node = kwargs['node'] or kwargs['project']
if action in (NodeLog.FILE_MOVED, NodeLog.FILE_COPIED):
for bundle in ('source', 'destination'):
for key in ('provider', 'materialized', 'name', 'nid'):
if key not in payload[bundle]:
raise HTTPError(httplib.BAD_REQUEST)
dest = payload['destination']
src = payload['source']
if src is not None and dest is not None:
dest_path = dest['materialized']
src_path = src['materialized']
if dest_path.endswith('/') and src_path.endswith('/'):
dest_path = os.path.dirname(dest_path)
src_path = os.path.dirname(src_path)
if (
os.path.split(dest_path)[0] == os.path.split(src_path)[0] and
dest['provider'] == src['provider'] and
dest['nid'] == src['nid'] and
dest['name'] != src['name']
):
action = LOG_ACTION_MAP['rename']
destination_node = node # For clarity
source_node = Node.load(payload['source']['nid'])
source = source_node.get_addon(payload['source']['provider'])
destination = node.get_addon(payload['destination']['provider'])
payload['source'].update({
'materialized': payload['source']['materialized'].lstrip('/'),
'addon': source.config.full_name,
'url': source_node.web_url_for(
'addon_view_or_download_file',
path=payload['source']['path'].lstrip('/'),
provider=payload['source']['provider']
),
'node': {
'_id': source_node._id,
'url': source_node.url,
'title': source_node.title,
}
})
payload['destination'].update({
'materialized': payload['destination']['materialized'].lstrip('/'),
'addon': destination.config.full_name,
'url': destination_node.web_url_for(
'addon_view_or_download_file',
path=payload['destination']['path'].lstrip('/'),
provider=payload['destination']['provider']
),
'node': {
'_id': destination_node._id,
'url': destination_node.url,
'title': destination_node.title,
}
})
payload.update({
'node': destination_node._id,
'project': destination_node.parent_id,
})
if not payload.get('errors'):
destination_node.add_log(
action=action,
auth=auth,
params=payload
)
if payload.get('email') is True or payload.get('errors'):
mails.send_mail(
user.username,
mails.FILE_OPERATION_FAILED if payload.get('errors')
else mails.FILE_OPERATION_SUCCESS,
action=payload['action'],
source_node=source_node,
destination_node=destination_node,
source_path=payload['source']['materialized'],
destination_path=payload['source']['materialized'],
source_addon=payload['source']['addon'],
destination_addon=payload['destination']['addon'],
)
if payload.get('error'):
# Action failed but our function succeeded
# Bail out to avoid file_signals
return {'status': 'success'}
else:
try:
metadata = payload['metadata']
node_addon = node.get_addon(payload['provider'])
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
if node_addon is None:
raise HTTPError(httplib.BAD_REQUEST)
metadata['path'] = metadata['path'].lstrip('/')
node_addon.create_waterbutler_log(auth, action, metadata)
with TokuTransaction():
file_signals.file_updated.send(node=node, user=user, event_type=action, payload=payload)
return {'status': 'success'}
@file_signals.file_updated.connect
def addon_delete_file_node(self, node, user, event_type, payload):
""" Get addon StoredFileNode(s), move it into the TrashedFileNode collection
and remove it from StoredFileNode.
Required so that the guids of deleted addon files are not re-pointed when an
addon file or folder is moved or renamed.
"""
if event_type == 'file_removed' and payload.get('provider', None) != 'osfstorage':
provider = payload['provider']
path = payload['metadata']['path']
materialized_path = payload['metadata']['materialized']
if path.endswith('/'):
folder_children = FileNode.resolve_class(provider, FileNode.ANY).find(
Q('provider', 'eq', provider) &
Q('node', 'eq', node) &
Q('materialized_path', 'startswith', materialized_path)
)
for item in folder_children:
if item.kind == 'file' and not TrashedFileNode.load(item._id):
item.delete(user=user)
elif item.kind == 'folder':
StoredFileNode.remove_one(item.stored_object)
else:
try:
file_node = FileNode.resolve_class(provider, FileNode.FILE).find_one(
Q('node', 'eq', node) &
Q('materialized_path', 'eq', materialized_path)
)
except NoResultsFound:
file_node = None
if file_node and not TrashedFileNode.load(file_node._id):
file_node.delete(user=user)
@must_be_valid_project
def addon_view_or_download_file_legacy(**kwargs):
query_params = request.args.to_dict()
node = kwargs.get('node') or kwargs['project']
action = query_params.pop('action', 'view')
provider = kwargs.get('provider', 'osfstorage')
if kwargs.get('path'):
path = kwargs['path']
elif kwargs.get('fid'):
path = kwargs['fid']
if 'download' in request.path or request.path.startswith('/api/v1/'):
action = 'download'
if kwargs.get('vid'):
query_params['version'] = kwargs['vid']
# If provider is OSFstorage, check existence of requested file in the filetree
# This prevents invalid GUIDs from being created
if provider == 'osfstorage':
node_settings = node.get_addon('osfstorage')
try:
path = node_settings.get_root().find_child_by_name(path)._id
except NoResultsFound:
raise HTTPError(
404, data=dict(
message_short='File not found',
message_long='You requested a file that does not exist.'
)
)
return redirect(
node.web_url_for(
'addon_view_or_download_file',
path=path,
provider=provider,
action=action,
**query_params
),
code=httplib.MOVED_PERMANENTLY
)
@must_be_valid_project
@must_be_contributor_or_public
def addon_deleted_file(auth, node, **kwargs):
"""Shows a nice error message to users when they try to view
a deleted file
"""
# Allow file_node to be passed in so other views can delegate to this one
trashed = kwargs.get('file_node') or TrashedFileNode.load(kwargs.get('trashed_id'))
if not trashed:
raise HTTPError(httplib.NOT_FOUND, {
'message_short': 'Not Found',
'message_long': 'This file does not exist'
})
ret = serialize_node(node, auth, primary=True)
ret.update(rubeus.collect_addon_assets(node))
ret.update({
'urls': {
'render': None,
'sharejs': None,
'mfr': settings.MFR_SERVER_URL,
'gravatar': get_gravatar(auth.user, 25),
'files': node.web_url_for('collect_file_trees'),
},
'extra': {},
'size': 9966699, # Prevent file from being editted, just in case
'sharejs_uuid': None,
'file_name': trashed.name,
'file_path': trashed.path,
'provider': trashed.provider,
'materialized_path': trashed.materialized_path,
'error': FILE_GONE_ERROR_MESSAGE.format(file_name=trashed.name),
'private': getattr(node.get_addon(trashed.provider), 'is_private', False),
'file_id': trashed._id,
# For the off chance that there is no GUID
'file_guid': getattr(trashed.get_guid(create=False), '_id', None),
'file_tags': [tag._id for tag in trashed.tags],
'file_name_ext': os.path.splitext(trashed.name)[1],
'file_name_title': os.path.splitext(trashed.name)[0],
'allow_comments': trashed.provider in settings.ADDONS_COMMENTABLE,
})
return ret, httplib.GONE
@must_be_valid_project
@must_be_contributor_or_public
def addon_view_or_download_file(auth, path, provider, **kwargs):
extras = request.args.to_dict()
extras.pop('_', None) # Clean up our url params a bit
action = extras.get('action', 'view')
node = kwargs.get('node') or kwargs['project']
node_addon = node.get_addon(provider)
if not path:
raise HTTPError(httplib.BAD_REQUEST)
if not isinstance(node_addon, StorageAddonBase):
raise HTTPError(httplib.BAD_REQUEST, {
'message_short': 'Bad Request',
'message_long': 'The add-on containing this file is no longer connected to the {}.'.format(node.project_or_component)
})
if not node_addon.has_auth:
raise HTTPError(httplib.UNAUTHORIZED, {
'message_short': 'Unauthorized',
'message_long': 'The add-on containing this file is no longer authorized.'
})
if not node_addon.complete:
raise HTTPError(httplib.BAD_REQUEST, {
'message_short': 'Bad Request',
'message_long': 'The add-on containing this file is no longer configured.'
})
file_node = FileNode.resolve_class(provider, FileNode.FILE).get_or_create(node, path)
# Note: Cookie is provided for authentication to waterbutler
# it is overriden to force authentication as the current user
# the auth header is also pass to support basic auth
version = file_node.touch(
request.headers.get('Authorization'),
**dict(
extras,
cookie=request.cookies.get(settings.COOKIE_NAME)
)
)
if version is None:
if file_node.get_guid():
# If this file has been successfully view before but no longer exists
# Move file to trashed file node
if not TrashedFileNode.load(file_node._id):
file_node.delete()
# Show a nice error message
return addon_deleted_file(file_node=file_node, **kwargs)
raise HTTPError(httplib.NOT_FOUND, {
'message_short': 'Not Found',
'message_long': 'This file does not exist'
})
# TODO clean up these urls and unify what is used as a version identifier
if request.method == 'HEAD':
return make_response(('', 200, {
'Location': file_node.generate_waterbutler_url(**dict(extras, direct=None, version=version.identifier))
}))
if action == 'download':
return redirect(file_node.generate_waterbutler_url(**dict(extras, direct=None, version=version.identifier)))
if len(request.path.strip('/').split('/')) > 1:
guid = file_node.get_guid(create=True)
return redirect(furl.furl('/{}/'.format(guid._id)).set(args=extras).url)
return addon_view_file(auth, node, file_node, version)
def addon_view_file(auth, node, file_node, version):
# TODO: resolve circular import issue
from website.addons.wiki import settings as wiki_settings
if isinstance(version, tuple):
version, error = version
error = error.replace('\n', '').strip()
else:
error = None
ret = serialize_node(node, auth, primary=True)
if file_node._id not in node.file_guid_to_share_uuids:
node.file_guid_to_share_uuids[file_node._id] = uuid.uuid4()
node.save()
if ret['user']['can_edit']:
sharejs_uuid = str(node.file_guid_to_share_uuids[file_node._id])
else:
sharejs_uuid = None
download_url = furl.furl(request.url.encode('utf-8')).set(args=dict(request.args, **{
'direct': None,
'mode': 'render',
'action': 'download',
}))
render_url = furl.furl(settings.MFR_SERVER_URL).set(
path=['render'],
args={'url': download_url.url}
)
ret.update({
'urls': {
'render': render_url.url,
'mfr': settings.MFR_SERVER_URL,
'sharejs': wiki_settings.SHAREJS_URL,
'gravatar': get_gravatar(auth.user, 25),
'files': node.web_url_for('collect_file_trees'),
},
'error': error,
'file_name': file_node.name,
'file_name_title': os.path.splitext(file_node.name)[0],
'file_name_ext': os.path.splitext(file_node.name)[1],
'file_path': file_node.path,
'sharejs_uuid': sharejs_uuid,
'provider': file_node.provider,
'materialized_path': file_node.materialized_path,
'extra': version.metadata.get('extra', {}),
'size': version.size if version.size is not None else 9966699,
'private': getattr(node.get_addon(file_node.provider), 'is_private', False),
'file_tags': [tag._id for tag in file_node.tags],
'file_guid': file_node.get_guid()._id,
'file_id': file_node._id,
'allow_comments': file_node.provider in settings.ADDONS_COMMENTABLE
})
ret.update(rubeus.collect_addon_assets(node))
return ret
|
kch8qx/osf.io
|
website/addons/base/views.py
|
Python
|
apache-2.0
| 23,401
|
def max_contig_sum(L):
""" L, a list of integers, at least one positive
Returns the maximum sum of a contiguous subsequence in L """
max_contiguous_sum = 0
point = 0
for subsequence in range(len(L)):
current_subsequence = []
for index, number in enumerate(L[point:]):
if not current_subsequence and number <= 0:
continue
else:
try:
if sum(current_subsequence) + number > sum(current_subsequence) or sum(current_subsequence) + L[index + 1] > sum(current_subsequence) + number:
current_subsequence.append(number)
if sum(current_subsequence) > max_contiguous_sum:
max_contiguous_sum = sum(current_subsequence)
else:
current_subsequence = []
except IndexError:
if sum(current_subsequence) + number > sum(current_subsequence):
current_subsequence.append(number)
if sum(current_subsequence) > max_contiguous_sum:
max_contiguous_sum = sum(current_subsequence)
point += 1
return max_contiguous_sum
assert max_contig_sum([1]) == 1
assert max_contig_sum([1, -1]) == 1
assert max_contig_sum([10, 9, 8, -1]) == 27
assert max_contig_sum([0, -2, -5, -1, 5]) == 5
assert max_contig_sum([3, 4, -1, 5, -4]) == 11
assert max_contig_sum([3, 4, -8, 15, -1, 2]) == 16
assert max_contig_sum([3, 4, -8, 3, 3, 1, -7, 15, -1, 2]) == 16
assert max_contig_sum([0, -2, -7, 3, 3, -7, 15, 2]) == 17
print('win')
|
Mdlkxzmcp/various_python
|
MITx/6002x/quiz_max_contig_sum.py
|
Python
|
mit
| 1,639
|
"""
WSGI config for spa_movies project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "spa_movies.settings")
application = get_wsgi_application()
|
davgibbs/movies-spa
|
apps/spa_movies/wsgi.py
|
Python
|
gpl-3.0
| 397
|
import xml.etree.cElementTree as ET
from whoosh.fields import Schema, TEXT, KEYWORD
from whoosh.index import create_in
import shelve, os, sys
import cPickle
"""
Internal Stack Exchange - Indexer
---------------------------------
This module scans the available datadumps in the /Datadumps directory,
and parses the .xml files it finds their.
It then builds a database that holds 'docs'. A 'doc' is a full
question + answers dataset, and is referenced by it's question Id.
It also creates a search engine index (using the Woosh library), that
allows the user to index and search for docs using tags, keywords, etc.
"""
#####################################################################
#####################################################################
def parse_xmls(path_to_xmls, site_name):
""" Create a file <site_name>.db under the folder /db/.
The file is a shelve, containing full docs of the named site. Key is the question id.
Note: this function usage of Shelve is optimzed for memory use, since the S.E datadumps
can be huge.
All the .xml files are parsed into Shelves, so that their content will
not need to stay in memory. The shelve is opened and read only when needed.
"""
###################################################################################
#Create a shelve to hold users' info: {user id : user info}
shlv = shelve.open('../temp_db/tmp_users.db', 'n', protocol = -1, writeback = False)
#Memory efficient method, allows for clearing the root.
context = ET.iterparse(path_to_xmls + 'Users.xml', events = ('start', 'end'))
context = iter(context)
event, root = context.next() #get root
print ("******* Starting Users.xml parsing ******")
i = 0
for (event, user) in context:
if event == 'end' and user.tag=='row':
shlv[user.attrib['Id']] = user.attrib
#Log out progress to the caller.
i += 1
if i%5000==0:
shlv.sync() #Syncing the shelve clears the cache, and frees the memory.
print ("Processed {0} users so far.".format(i))
root.clear()
shlv.close()
###################################################################################
#Create a shelve to hold RelatedPosts info: {post id (id of relevant post) : list of related post id}
shlv = shelve.open('../temp_db/tmp_related_posts.db', 'n', protocol = -1, writeback = False)
#Memory efficient method
context = ET.iterparse(path_to_xmls + 'PostLinks.xml', events = ('start', 'end'))
context = iter(context)
event, root = context.next() #get root
print ("******* Starting PostLinks.xml parsing ******")
i = 0
for (event, postlink) in context:
if event == 'end' and postlink.tag=='row':
#Check if the shelve already has the post_id key, and if not - create a new one.
post_id = postlink.attrib['PostId']
list_of_related_links = shlv.get(post_id, [])
list_of_related_links.append(postlink.attrib)
shlv.update({post_id: list_of_related_links})
#Log out progress to the user.
i += 1
if i%5000==0:
shlv.sync()
print ("Processed {0} PostLinks so far.".format(i))
root.clear()
shlv.close()
###################################################################################
#Create a shelve to hold comments info: {post id : list of comments}
shlv = shelve.open('../temp_db/tmp_comments.db', 'n', protocol = -1, writeback = True)
#This shlv holds the user data.
tmp_users_shlv = shelve.open('../temp_db/tmp_users.db', 'r', protocol = -1)
#Memory efficient method
context = ET.iterparse(path_to_xmls + 'Comments.xml', events = ('start', 'end'))
context = iter(context)
event, root = context.next()
print ("******* Starting Comments.xml parsing ******")
i = 0
for (event, comment) in context:
if event == 'end' and comment.tag=='row':
if 'UserId' in comment.attrib.keys():
#If the comment has a userId, we try to find the user's details
#in the tmp_users_shlv. If there is none, we keep the field empty.
user_id = comment.attrib.get('UserId', '')
user_data = tmp_users_shlv.get(user_id, '')
comment.attrib.update({'User': user_data})
post_id = comment.attrib['PostId']
list_of_comments = shlv.get(post_id, [])
list_of_comments.append(comment.attrib)
shlv.update({post_id:list_of_comments})
#Log out progress to the user.
i += 1
if i%10000==0:
shlv.sync()
print ("Processed {0} Comments so far.".format(i))
root.clear()
tmp_users_shlv.close()
shlv.close()
###################################################################################
#Create a shelve to hold questions info only: {post id: post info}
#Same for answers, but structure is: {parent id: list of posts}
tmp_questions_shlv = shelve.open('../temp_db/tmp_questions.db', 'n', protocol = -1, writeback = True)
tmp_answers_shlv = shelve.open('../temp_db/tmp_answers.db', 'n', protocol = -1, writeback = True)
tmp_comments_shlv = shelve.open('../temp_db/tmp_comments.db', 'r', protocol = -1)
tmp_users_shlv = shelve.open('../temp_db/tmp_users.db', 'r', protocol = -1)
tmp_postlinks_shlv = shelve.open('../temp_db/tmp_related_posts.db', 'r', protocol = -1)
context = ET.iterparse(path_to_xmls + 'Posts.xml', events = ('start', 'end'))
context = iter(context)
event, root = context.next()
print ("******* Starting Posts.xml parsing ******")
i = 0
for (event, post) in context:
if event == 'end' and post.tag == 'row':
if (post.attrib['PostTypeId']=='1'):#A question
tmp_questions_shlv[post.attrib['Id']] = post.attrib
elif (post.attrib['PostTypeId']=='2'):#An Answer
if 'OwnerUserId' in post.attrib.keys():
#If we have the user details, add them to the answer.
user_id = post.attrib['OwnerUserId']
user_data = tmp_users_shlv.get(user_id, '')
post.attrib.update({'User': user_data})
post_id = post.attrib['Id']
list_of_postlinks = tmp_postlinks_shlv.get(post_id, [])
post.attrib.update({'PostLinks': list_of_postlinks})
list_of_comments = tmp_comments_shlv.get(post_id, [])
post.attrib.update({'Comments': list_of_comments})
parent_id = post.attrib['ParentId']
list_of_answers = tmp_answers_shlv.get(parent_id, [])
list_of_answers.append(post.attrib)
tmp_answers_shlv.update({parent_id:list_of_answers})
i += 1
if i%5000==0:
tmp_questions_shlv.sync()
tmp_answers_shlv.sync()
print ("Processed {0} Posts so far.".format(i))
root.clear()
tmp_postlinks_shlv.close()
tmp_users_shlv.close()
tmp_comments_shlv.close()
tmp_questions_shlv.close()
tmp_answers_shlv.close()
####################################################################################
# Create the shelve that will hold the full documents. {question id : doc}
full_docs_shlv = shelve.open('../db/' + site_name +'.db', 'n', protocol = -1, writeback = True)
tmp_posts_shlv = shelve.open('../temp_db/tmp_questions.db', 'r', protocol = -1)
tmp_users_shlv = shelve.open('../temp_db/tmp_users.db', 'r', protocol = -1)
tmp_answers_shlv = shelve.open('../temp_db/tmp_answers.db', 'r', protocol = -1)
tmp_comments_shlv = shelve.open('../temp_db/tmp_comments.db', 'r', protocol = -1)
tmp_postlinks_shlv = shelve.open('../temp_db/tmp_related_posts.db', 'r', protocol = -1)
print ("******* Now creating full docs ******")
i = 0
num_of_docs = len(tmp_posts_shlv.keys())
for id in tmp_posts_shlv.keys():
doc_template = {'Comments' : [],
'PostLinks' : [],
'Answers' : [],
'User' : '',
'AcceptedAnswerId' : '',
'Body' : '',
'OwnerUserId' : '',
'Title' : '',
'Tags' : '',
'Score' : ''
}
doc_template['Title'] = tmp_posts_shlv[id]['Title']
doc_template['Tags'] = tmp_posts_shlv[id]['Tags']
doc_template['Body'] = tmp_posts_shlv[id]['Body']
doc_template['Score'] = tmp_posts_shlv[id]['Score']
#return default value '' if none
doc_template['AcceptedAnswerId'] = tmp_posts_shlv[id].get('AcceptedAnswerId', '')
doc_template['OwnerUserId'] = tmp_posts_shlv[id].get('OwnerUserId', '')
doc_template['User'] = tmp_users_shlv.get(doc_template['OwnerUserId'], '')
#get all the comments, answers and postlinks. Return empty list if none.
doc_template['Comments'] = tmp_comments_shlv.get(id, [])
doc_template['Answers'] = tmp_answers_shlv.get(id, [])
doc_template['PostLinks'] = tmp_postlinks_shlv.get(id, [])
full_docs_shlv[id] = doc_template
i += 1
if i%1000==0:
full_docs_shlv.sync()
print ("Processed {0} Full Docs out of {1}.".format(i, num_of_docs))
tmp_posts_shlv.close()
tmp_users_shlv.close()
full_docs_shlv.close()
tmp_answers_shlv.close()
tmp_comments_shlv.close()
tmp_postlinks_shlv.close()
#####################################################################
#####################################################################
def create_schema(path_to_index_folder, db_name):
""" Create a schema for the whoosh index. Return a pointer to the created index.
"""
#The schema will hold the texts of a full document,
#the tags (As a comma seperated list) and the id of the question.
db_docs_schema = Schema(doc_texts = TEXT(),
doc_tags = KEYWORD(commas = True, scorable = True),
question_id = TEXT(stored = True))
db_docs_ix_pointer = create_in(path_to_index_folder,
schema = db_docs_schema,
indexname = db_name + '_index')
return db_docs_ix_pointer
#####################################################################
#####################################################################
#@profile
def index_data(db_docs_ix_pointer, site_name):
""" Do the search engine indexing of a data.
"""
doc_writer = db_docs_ix_pointer.writer(limitmb = 512, procs = 2)
full_docs_shlv = shelve.open('../db/' + site_name +'.db', 'r', protocol = -1)
num_of_docs = len(full_docs_shlv.keys())
i = 0
print ("Now Indexing {0}".format(site_name))
for qid in full_docs_shlv.keys():
#Extract all the texts from a document.
tmp_text = ''
tmp_text += full_docs_shlv[qid]['Title'] + ''
tmp_text += full_docs_shlv[qid]['Body'] + ''
tmp_text += ' '.join([comment['Text'] for comment in full_docs_shlv[qid]['Comments']]) + ' '
tmp_text += ' '.join([answer['Body'] for answer in full_docs_shlv[qid]['Answers']]) + ' '
for answer in full_docs_shlv[qid]['Answers']:
tmp_text += ' '.join([ans_comment['Text'] for ans_comment in answer['Comments']]) + ' '
#Convert the tags from the form <aa><bb> to ['aa','bb']
tmp_tags = full_docs_shlv[qid]['Tags']
l = tmp_tags.split("><")
fixed_tags = [tag.replace("<", "").replace(">","") for tag in l]
fixed_tags = unicode(",".join(fixed_tags))
doc_writer.add_document(doc_texts = unicode(tmp_text),
doc_tags = fixed_tags,
question_id = unicode(qid))
#Display a progress report to the user.
i+=1
if (i%100 == 0):
print ("Indexed doc {0} out of {1}".format(i,num_of_docs))
db_docs_ix_pointer.close()
full_docs_shlv.close()
doc_writer.commit()
return
#####################################################################
#####################################################################
def get_tags_information(path_to_datadumps, site_name):
""" Get the tags of a single site, and their count.
Return a list of the form: [(tag name, tag count)..]
"""
tags_info = []
tags_root = ET.parse(path_to_datadumps + site_name + '/Tags.xml').getroot()
for tag in tags_root:
tag_name = tag.attrib['TagName']
count = tag.attrib['Count']
tags_info.append((tag_name, count))
return tags_info
#####################################################################
#####################################################################
#####################################################################
def main(is_debug_mode):
""" Iterate over all the avaiable datadumps, index them all and create a metadata file.
in debug_mode, allow the user to select which sites to index.
"""
if not os.path.exists('../Index'):
os.mkdir('../Index')
if not os.path.exists('../db'):
os.mkdir('../db')
if not os.path.exists('../temp_db'):
os.mkdir('../temp_db')
if not os.path.exists('../Metadata'):
os.mkdir('../Metadata')
if not os.path.exists('../Data'):
os.mkdir('../Data')
#Clean old metadata files, if present.
files = os.listdir('../Metadata/')
for file in files:
os.remove('../Metadata/'+file)
path_to_datadumps = '../Datadumps/'
site_names = os.listdir(path_to_datadumps)
num_of_sites = len(site_names)
#create a shelv to hold the metadata
metadata_shelve = shelve.open('../Metadata/metadata.db', 'n', protocol = -1, writeback = False)
j=0
for site_name in site_names:
j += 1
print ("----> Now Parsing {0} ({1}/{2})<-----".format(site_name, j, num_of_sites))
if is_debug_mode:
#Allow the user to skip indexing of a datadump
user_input = raw_input('Skip {0}?'.format(site_name))
if user_input=='y':
#The user does not want to index - but if the site's data as already indexed
#we want it to appear in the metadata
if os.path.isfile('../db/'+site_name+'.db'):
#get the tags information
tags_info = get_tags_information(path_to_datadumps, site_name) #[(tag name, size), ..]
#Store metadata as shelve dict: {db_name, (number of docs, list of tags)}
full_docs_shlv = shelve.open('../db/' + site_name +'.db', 'r', protocol = -1)
metadata_shelve[site_name] = (str(len(full_docs_shlv.keys())), tags_info)
full_docs_shlv.close()
continue
#delete the temp_dbs
temp_files = os.listdir('../temp_db/')
for temp_file in temp_files:
os.remove('../temp_db/'+temp_file)
if os.path.isfile('../db/'+site_name+'.db'):
os.remove('../db/'+site_name+'.db')
#Parse the xmls, and index the documents
parse_xmls(path_to_datadumps + site_name + '/', site_name)
db_docs_ix_pointer = create_schema('../Index', site_name)
index_data(db_docs_ix_pointer, site_name)
#get the tags information
tags_info = get_tags_information(path_to_datadumps, site_name) #[(tag name, size), ..]
#Store metadata as shelve dict: {db_name, (number of docs, list of tags)}
full_docs_shlv = shelve.open('../db/' + site_name +'.db', 'r', protocol = -1)
metadata_shelve[site_name] = (str(len(full_docs_shlv.keys())), tags_info)
full_docs_shlv.close()
metadata_shelve.close()
import shutil
shutil.rmtree('../temp_db', ignore_errors=True)
if __name__ == "__main__":
try:
debug_selector = sys.argv[1]
except IndexError:
debug_selector = None
if debug_selector == "debug":
debug_mode = True
else:
debug_mode = False
main(debug_mode)
|
Ranlevi/InternalSE
|
Sources/Indexer.py
|
Python
|
gpl-2.0
| 16,807
|
"""The Quantitative Imaging Profile (*QiPr*) REST server."""
__version__ = '6.2.12'
"""
The one-based major.minor.patch version. Minor and patch
version numbers begin at 1.
"""
|
ohsu-qin/qirest
|
qirest/__init__.py
|
Python
|
bsd-2-clause
| 178
|
# Copyright (c) 2018, Henrique Miranda
# All rights reserved.
#
# This file is part of the yambopy project
#
from yambopy import *
from math import sqrt
from time import time
from yambopy.tools.string import marquee
from yambopy.tools.funcs import abs2, lorentzian, gaussian
class YamboDipolesDB():
"""
Class to read the dipoles databases from the ``ndb.dip*`` files
Can be used to for exapmle plot the imaginary part of the dielectric
function which corresponds to the optical absorption
"""
def __init__(self,lattice,save='SAVE',filename='ndb.dip_iR_and_P',dip_type='iR',field_dir=[1,0,0],field_dir3=[0,0,1]):
self.lattice = lattice
self.filename = "%s/%s"%(save,filename)
#read dipoles
try:
database = Dataset(self.filename, 'r')
except:
raise IOError("Error opening %s in YamboDipolesDB"%self.filename)
self.nq_ibz, self.nq_bz, self.nk_ibz, self.nk_bz = database.variables['HEAD_R_LATT'][:].astype(int)
self.spin = database.variables['SPIN_VARS'][1].astype(int)
# indexv is the maximum partially occupied band
# indexc is the minimum partially empty band
self.min_band, self.max_band, self.indexv, self.indexc = database.variables['PARS'][:4].astype(int)
database.close()
# determine the number of bands
self.nbands = self.max_band-self.min_band+1
self.nbandsv = self.indexv-self.min_band+1
self.nbandsc = self.max_band-self.indexc+1
#read the database
self.dipoles = self.readDB(dip_type)
#expand the dipoles to the full brillouin zone
self.expandDipoles(self.dipoles)
def normalize(self,electrons):
"""
Use the electrons to normalize the dipole matrix elements
"""
eiv = electrons.eigenvalues
nkpoints, nbands = eiv.shape
for nk in range(nkpoints):
eivk = eiv[nk]
#create eigenvalues differences arrays
norm = np.array([ [ec-ev for ev in eivk] for ec in eivk ])
#normalize
for i,j in product(list(range(nbands)),repeat=2):
if norm[i,j] == 0:
self.dipoles[nk,:,i,j] = 0
else:
self.dipoles[nk,:,i,j] = self.dipoles[nk,:,i,j]/norm[i,j]
dipoles = self.dipoles
def readDB(self,dip_type):
"""
The dipole matrix has the following indexes:
[nkpoints, 3, nspin, nbands conduction, nbands valence]
"""
self.dip_type = dip_type
dipoles = np.zeros([self.nk_ibz,3,self.nbandsc,self.nbandsv],dtype=np.complex64)
#check dipole db format
filename = "%s_fragment_1"%(self.filename)
database = Dataset(filename)
tag1 = 'DIP_iR_k_0001_spin_0001'
tag2 = 'DIP_iR_k_0001_xyz_0001_spin_0001'
if tag1 in list(database.variables.keys()):
dipoles_format = 1
elif tag2 in list(database.variables.keys()):
dipoles_format = 2
database.close()
for nk in range(self.nk_ibz):
#open database for each k-point
filename = "%s_fragment_%d"%(self.filename,nk+1)
database = Dataset(filename)
if dipoles_format == 1:
dip = database.variables['DIP_%s_k_%04d_spin_%04d'%(dip_type,nk+1,1)]
dip = (dip[:,:,:,0]+1j*dip[:,:,:,1])
for i in range(3):
dipoles[nk,i] = dip[:,:,i].T
elif dipoles_format == 2:
for i in range(3):
dip = database.variables['DIP_%s_k_%04d_xyz_%04d_spin_%04d'%(dip_type,nk+1,i+1,1)][:]
dipoles[nk,i] = dip[0].T+dip[1].T*1j
#close database
database.close()
return dipoles
def expandDipoles(self,dipoles=None,field_dir=[1,0,0],field_dir3=[0,0,1]):
"""
Expand diples from the IBZ to the FBZ
"""
if dipoles is None:
dipoles = self.dipoles
#check if we need to expand the dipoles to the full BZ
lattice = self.lattice
kpts = lattice.car_kpoints
nks = lattice.kpoints_indexes
nss = lattice.symmetry_indexes
#normalize the fields
field_dir = np.array(field_dir)
field_dir = field_dir/np.linalg.norm(field_dir)
field_dir3 = np.array(field_dir3)
field_dir3 = field_dir3/np.linalg.norm(field_dir3)
#calculate polarization directions
field_dirx = field_dir
field_diry = np.cross(field_dir3,field_dirx)
field_dirz = field_dir3
#get band indexes
nkpoints = len(nks)
indexv = self.min_band-1
indexc = self.indexc-1
nbands = self.min_band+self.nbands-1
#Note that P is Hermitian and iR anti-hermitian.
if self.dip_type == 'P':
factor = 1.0
else:
factor = -1.0
#save dipoles in the ibz
self.dipoles_ibz = dipoles
#get dipoles in the full Brilouin zone
self.dipoles = np.zeros([nkpoints,3,nbands,nbands],dtype=np.complex64)
for nk_fbz,nk_ibz,ns in zip(list(range(nkpoints)),nks,nss):
#if time rev we conjugate
if lattice.time_rev_list[ns]:
dip = np.conjugate(dipoles[nk_ibz,:,:,:])
else:
dip = dipoles[nk_ibz,:,:,:]
#get symmmetry operation
sym = lattice.sym_car[ns].T
#get projection operation
pro = np.array([field_dirx,field_diry,field_dirz])
#transformation
tra = np.dot(pro,sym)
for c,v in product(list(range(self.nbandsc)),list(range(self.nbandsv))):
#rotate dipoles
self.dipoles[nk_fbz,:,indexc+c,indexv+v] = np.dot(tra,dip[:,c,v])
#make hermitian
for c,v in product(list(range(self.nbandsc)),list(range(self.nbandsv))):
self.dipoles[nk_fbz,:,indexv+v,indexc+c] = factor*np.conjugate(self.dipoles[nk_fbz,:,indexc+c,indexv+v])
self.field_dirx = field_dirx
self.field_diry = field_diry
self.field_dirz = field_dirz
return dipoles, kpts
def plot(self,ax,kpoint=0,dir=0,func=abs2):
return ax.matshow(func(self.dipoles[kpoint,dir]))
def ip_eps2(self,electrons,pol=1,ntot_dip=-1,GWshift=0.,broad=0.1,broadtype='l',nbnds=[-1,-1],emin=0.,emax=10.,esteps=500):
"""
Compute independent-particle absorption (by Fulvio Paleari)
electrons -> electrons YamboElectronsDB
GWshift -> rigid GW shift in eV
broad -> broadening of peaks
broadtype -> 'l' is lorentzian, 'g' is gaussian
nbnds -> number of [valence, conduction] bands included starting from Fermi level. Default means all are included
emin,emax,esteps -> frequency range for the plot
"""
#get eigenvalues and weights of electrons
eiv = electrons.eigenvalues
print(eiv.shape)
weights = electrons.weights
nv = electrons.nbandsv
nc = electrons.nbandsc
#get dipoles
dipoles = self.dipoles
#get frequencies and im
freq = np.linspace(emin,emax,esteps)
eps2 = np.zeros([len(freq)])
#Cut bands to the maximum number used for the dipoles
if ntot_dip>0:
eiv = eiv[:,:ntot_dip]
nc=ntot_dip-nv
#Print band gap values and apply GW_shift
electrons.energy_gaps(GWshift)
#Check bands to include in the calculation
if nbnds[0]<0: nbnds[0]=nv
if nbnds[1]<0: nbnds[1]=nc
iv = nv-nbnds[0] #first valence
lc = nv+nbnds[1] #last conduction
#choose broadening
if "l" in broadtype:
broadening = lorentzian
else:
broadening = gaussian
na = np.newaxis
#calculate epsilon
for c,v in product(list(range(nv,lc)),list(range(iv,nv))):
#get electron-hole energy and dipoles
ecv = eiv[:,c]-eiv[:,v]
dip2 = abs2(dipoles[:,pol,c-nv,v])
#make dimensions match
dip2a = dip2[na,:]
ecva = ecv[na,:]
freqa = freq[:,na]
wa = weights[na,:]
#calculate the lorentzians
broadw = broadening(freqa,ecva,broad)
#scale broadening with dipoles and weights
epsk = wa*dip2a*broadw
#integrate over kpoints
eps2 += np.sum(epsk,axis=1)
return freq, eps2
def __str__(self):
lines = []; app = lines.append
app(marquee(self.__class__.__name__))
app("kpoints:")
app("nk_ibz : %d"%self.nk_ibz)
app("nk_bz : %d"%self.nk_bz)
app("bands:")
app("nbands : %d" % self.nbands)
app("nbandsv: %d" % self.nbandsv)
app("nbandsc: %d" % self.nbandsc)
app("indexv : %d" % (self.min_band-1))
app("indexc : %d" % (self.indexc-1))
app("field_dirx: %10.6lf %10.6lf %10.6lf"%tuple(self.field_dirx))
app("field_diry: %10.6lf %10.6lf %10.6lf"%tuple(self.field_diry))
app("field_dirz: %10.6lf %10.6lf %10.6lf"%tuple(self.field_dirz))
return "\n".join(lines)
if __name__ == "__main__":
ddb = DipolesDB()
ddb.get_databases()
print(ddb)
|
henriquemiranda/yambo-py
|
yambopy/dbs/dipolesdb.py
|
Python
|
bsd-3-clause
| 9,640
|
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from testrunner.local import testsuite
from testrunner.objects import testcase
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
class TestSuite(testsuite.TestSuite):
def ListTests(self):
tests = []
for dirname, dirs, files in os.walk(self.root):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
dirs.sort()
files.sort()
for filename in files:
if (filename.endswith(".js") and filename != "test-api.js"):
fullpath = os.path.join(dirname, filename)
relpath = fullpath[len(self.root) + 1 : -3]
testname = relpath.replace(os.path.sep, "/")
test = self._create_test(testname)
tests.append(test)
return tests
def _test_class(self):
return TestCase
class TestCase(testcase.D8TestCase):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
source = self.get_source()
self._source_files = self._parse_source_files(source)
self._source_flags = self._parse_source_flags(source)
def _parse_source_files(self, source):
files_list = [] # List of file names to append to command arguments.
files_match = FILES_PATTERN.search(source);
# Accept several lines of 'Files:'.
while True:
if files_match:
files_list += files_match.group(1).strip().split()
files_match = FILES_PATTERN.search(source, files_match.end())
else:
break
files = []
files.append(os.path.normpath(os.path.join(
self.suite.root, "..", "mjsunit", "mjsunit.js")))
files.append(os.path.join(self.suite.root, "test-api.js"))
files.extend([os.path.normpath(os.path.join(self.suite.root, '..', '..', f))
for f in files_list])
if MODULE_PATTERN.search(source):
files.append("--module")
files.append(os.path.join(self.suite.root, self.path + self._get_suffix()))
return files
def _get_files_params(self):
files = self._source_files
if self._test_config.isolates:
files = files + ['--isolate'] + files
return files
def _get_source_flags(self):
return self._source_flags
def _get_suite_flags(self):
return ['--enable-inspector', '--allow-natives-syntax']
def _get_source_path(self):
return os.path.join(self.suite.root, self.path + self._get_suffix())
def GetSuite(*args, **kwargs):
return TestSuite(*args, **kwargs)
|
fceller/arangodb
|
3rdParty/V8/v7.1.302.28/test/debugger/testcfg.py
|
Python
|
apache-2.0
| 2,660
|
# vi:set ts=8 sts=4 sw=4 et tw=80:
#
# Copyright (C) 2007 Xavier de Gaye.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program (see the file COPYING); if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
#
"""Pyclewn event loop."""
import os
import time
import select
import errno
import asyncore
import threading
if os.name == 'nt':
from clewn.nt import PipePeek
else:
from clewn.posix import PipePeek
import clewn.asyncproc as asyncproc
import clewn.misc as misc
# set the logging methods
(critical, error, warning, info, debug) = misc.logmethods('loop')
Unused = critical
Unused = error
Unused = warning
Unused = info
Unused = debug
use_select_emulation = ('CLEWN_PIPES' in os.environ or os.name == 'nt')
def get_asyncobj(fd, file_type, socket_map):
"""Return an asyncore instance from 'socket_map' if matching 'file_type'."""
asyncobj = socket_map.get(fd)
if asyncobj and isinstance(asyncobj.socket, file_type):
return asyncobj
return None
def strip_asyncobj(wtd, file_type, socket_map):
"""Remove all 'file_type' file descriptors in 'wtd'."""
tmp_list = wtd[:]
for fd in tmp_list:
asyncobj = get_asyncobj(fd, file_type, socket_map)
if asyncobj is not None:
wtd.remove(fd)
def clewn_select(iwtd, owtd, ewtd, timeout, poll):
"""Windows select emulation on pipes and sockets.
The select_peeker thread, once created, is never destroyed.
"""
select_peeker = None
pipe_objects = []
# pipes send only read events
strip_asyncobj(owtd, asyncproc.FileWrapper, poll.socket_map)
strip_asyncobj(ewtd, asyncproc.FileWrapper, poll.socket_map)
# start the peek threads
for fd in iwtd:
asyncobj = get_asyncobj(fd, asyncproc.FileWrapper, poll.socket_map)
if asyncobj is not None:
assert hasattr(asyncobj, 'reader') and asyncobj.reader
if not hasattr(asyncobj, 'peeker'):
asyncobj.peeker = PipePeek(asyncobj.socket.fileno(),
asyncobj, poll.select_event)
asyncobj.peeker.start()
pipe_objects.append(asyncobj)
iwtd.remove(fd)
asyncobj.peeker.start_thread()
if iwtd or owtd or ewtd:
select_peeker = poll.select_thread
select_peeker.set_waitable(iwtd, owtd, ewtd)
select_peeker.start_thread()
# wait for events
iwtd = []
owtd = []
ewtd = []
if select_peeker is None and not pipe_objects:
time.sleep(timeout)
else:
try:
poll.select_event.wait(timeout)
finally:
# stop the select threads
if select_peeker is not None:
iwtd, owtd, ewtd = select_peeker.stop_thread()
for asyncobj in pipe_objects:
asyncobj.peeker.stop_thread()
if asyncobj.peeker.read_event:
iwtd.append(asyncobj.socket.fileno())
poll.select_event.clear()
return iwtd, owtd, ewtd
class Poll:
"""A Poll instance manages a select thread.
Instance attributes:
socket_map: dict
the asyncore map
select_thread: Thread
the thread running the select call
select_event: Event
the Event object that the clewn_select emulation is waiting on
"""
def __init__(self, socket_map):
"""Constructor."""
self.socket_map = socket_map
if use_select_emulation:
self.select_event = threading.Event()
self.select_thread = asyncproc.SelectPeek(
self.socket_map, self.select_event)
# note that the socket_map MUST NOT be empty
self.select_thread.start()
def close(self):
"""Terminate the select thread."""
if use_select_emulation and not self.socket_map:
if self.select_thread and self.select_thread.isAlive():
self.select_thread.join()
def run(self, timeout=0.0):
"""Run the asyncore poll function."""
if self.socket_map:
r = []; w = []; e = []
for fd, obj in self.socket_map.items():
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
if is_w:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
else:
try:
if use_select_emulation:
r, w, e = clewn_select(r, w, e, timeout, self)
else:
r, w, e = select.select(r, w, e, timeout)
except select.error, err:
if err[0] != errno.EINTR:
raise
else:
return
for fd in r:
obj = self.socket_map.get(fd)
if obj is None:
continue
asyncore.read(obj)
for fd in w:
obj = self.socket_map.get(fd)
if obj is None:
continue
asyncore.write(obj)
for fd in e:
obj = self.socket_map.get(fd)
if obj is None:
continue
asyncore._exception(obj)
|
newclear/vim-pyclewn
|
pyclewn/clewn/evtloop.py
|
Python
|
gpl-2.0
| 6,023
|
#!/usr/bin/env python3
from collections import Counter
def are_anagrams(*args):
'return True if args are anagrams'
if len(args) < 2:
raise TypeError("expected 2 or more arguments")
c = Counter(args[0])
return all(c == Counter(a) for a in args[1:])
arg1 = "appel apple aplep leapp".split()
#print("check if {} are anagrams".format(arg1))
print("are_anagrams {} ? {} ".format(arg1, are_anagrams(*arg1)))
|
kmad1729/python_notes
|
gen_progs/are_anagram.py
|
Python
|
unlicense
| 432
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: cloudwatchevent_rule
short_description: Manage CloudWatch Event rules and targets
description:
- This module creates and manages CloudWatch event rules and targets.
version_added: "2.2"
extends_documentation_fragment:
- aws
- ec2
author: "Jim Dalton (@jsdalton) <jim.dalton@gmail.com>"
requirements:
- python >= 2.6
- boto3
notes:
- A rule must contain at least an I(event_pattern) or I(schedule_expression). A
rule can have both an I(event_pattern) and a I(schedule_expression), in which
case the rule will trigger on matching events as well as on a schedule.
- When specifying targets, I(input) and I(input_path) are mutually-exclusive
and optional parameters.
options:
name:
description:
- The name of the rule you are creating, updating or deleting. No spaces
or special characters allowed (i.e. must match C([\.\-_A-Za-z0-9]+))
required: true
schedule_expression:
description:
- A cron or rate expression that defines the schedule the rule will
trigger on. For example, C(cron(0 20 * * ? *)), C(rate(5 minutes))
required: false
event_pattern:
description:
- A string pattern (in valid JSON format) that is used to match against
incoming events to determine if the rule should be triggered
required: false
state:
description:
- Whether the rule is present (and enabled), disabled, or absent
choices: ["present", "disabled", "absent"]
default: present
required: false
description:
description:
- A description of the rule
required: false
role_arn:
description:
- The Amazon Resource Name (ARN) of the IAM role associated with the rule
required: false
targets:
description:
- "A dictionary array of targets to add to or update for the rule, in the
form C({ id: [string], arn: [string], role_arn: [string], input: [valid JSON string],
input_path: [valid JSONPath string], ecs_parameters: {task_definition_arn: [string], task_count: [int]}}).
I(id) [required] is the unique target assignment ID. I(arn) (required)
is the Amazon Resource Name associated with the target. I(role_arn) (optional) is The Amazon Resource Name
of the IAM role to be used for this target when the rule is triggered. I(input)
(optional) is a JSON object that will override the event data when
passed to the target. I(input_path) (optional) is a JSONPath string
(e.g. C($.detail)) that specifies the part of the event data to be
passed to the target. If neither I(input) nor I(input_path) is
specified, then the entire event is passed to the target in JSON form.
I(task_definition_arn) [optional] is ecs task definition arn.
I(task_count) [optional] is ecs task count."
required: false
'''
EXAMPLES = '''
- cloudwatchevent_rule:
name: MyCronTask
schedule_expression: "cron(0 20 * * ? *)"
description: Run my scheduled task
targets:
- id: MyTargetId
arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
- cloudwatchevent_rule:
name: MyDisabledCronTask
schedule_expression: "cron(5 minutes)"
description: Run my disabled scheduled task
state: disabled
targets:
- id: MyOtherTargetId
arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
input: '{"foo": "bar"}'
- cloudwatchevent_rule:
name: MyCronTask
state: absent
'''
RETURN = '''
rule:
description: CloudWatch Event rule data
returned: success
type: dict
sample:
arn: 'arn:aws:events:us-east-1:123456789012:rule/MyCronTask'
description: 'Run my scheduled task'
name: 'MyCronTask'
schedule_expression: 'cron(0 20 * * ? *)'
state: 'ENABLED'
targets:
description: CloudWatch Event target(s) assigned to the rule
returned: success
type: list
sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]"
'''
try:
import botocore
except ImportError:
pass # handled by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, camel_dict_to_snake_dict
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info
class CloudWatchEventRule(object):
def __init__(self, module, name, client, schedule_expression=None,
event_pattern=None, description=None, role_arn=None):
self.name = name
self.client = client
self.changed = False
self.schedule_expression = schedule_expression
self.event_pattern = event_pattern
self.description = description
self.role_arn = role_arn
self.module = module
def describe(self):
"""Returns the existing details of the rule in AWS"""
try:
rule_info = self.client.describe_rule(Name=self.name)
except botocore.exceptions.ClientError as e:
error_code = e.response.get('Error', {}).get('Code')
if error_code == 'ResourceNotFoundException':
return {}
self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name)
except botocore.exceptions.BotoCoreError as e:
self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name)
return self._snakify(rule_info)
def put(self, enabled=True):
"""Creates or updates the rule in AWS"""
request = {
'Name': self.name,
'State': "ENABLED" if enabled else "DISABLED",
}
if self.schedule_expression:
request['ScheduleExpression'] = self.schedule_expression
if self.event_pattern:
request['EventPattern'] = self.event_pattern
if self.description:
request['Description'] = self.description
if self.role_arn:
request['RoleArn'] = self.role_arn
try:
response = self.client.put_rule(**request)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Could not create/update rule %s" % self.name)
self.changed = True
return response
def delete(self):
"""Deletes the rule in AWS"""
self.remove_all_targets()
try:
response = self.client.delete_rule(Name=self.name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Could not delete rule %s" % self.name)
self.changed = True
return response
def enable(self):
"""Enables the rule in AWS"""
try:
response = self.client.enable_rule(Name=self.name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Could not enable rule %s" % self.name)
self.changed = True
return response
def disable(self):
"""Disables the rule in AWS"""
try:
response = self.client.disable_rule(Name=self.name)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Could not disable rule %s" % self.name)
self.changed = True
return response
def list_targets(self):
"""Lists the existing targets for the rule in AWS"""
try:
targets = self.client.list_targets_by_rule(Rule=self.name)
except botocore.exceptions.ClientError as e:
error_code = e.response.get('Error', {}).get('Code')
if error_code == 'ResourceNotFoundException':
return []
self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name)
except botocore.exceptions.BotoCoreError as e:
self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name)
return self._snakify(targets)['targets']
def put_targets(self, targets):
"""Creates or updates the provided targets on the rule in AWS"""
if not targets:
return
request = {
'Rule': self.name,
'Targets': self._targets_request(targets),
}
try:
response = self.client.put_targets(**request)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Could not create/update rule targets for rule %s" % self.name)
self.changed = True
return response
def remove_targets(self, target_ids):
"""Removes the provided targets from the rule in AWS"""
if not target_ids:
return
request = {
'Rule': self.name,
'Ids': target_ids
}
try:
response = self.client.remove_targets(**request)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
self.module.fail_json_aws(e, msg="Could not remove rule targets from rule %s" % self.name)
self.changed = True
return response
def remove_all_targets(self):
"""Removes all targets on rule"""
targets = self.list_targets()
return self.remove_targets([t['id'] for t in targets])
def _targets_request(self, targets):
"""Formats each target for the request"""
targets_request = []
for target in targets:
target_request = {
'Id': target['id'],
'Arn': target['arn']
}
if 'input' in target:
target_request['Input'] = target['input']
if 'input_path' in target:
target_request['InputPath'] = target['input_path']
if 'role_arn' in target:
target_request['RoleArn'] = target['role_arn']
if 'ecs_parameters' in target:
target_request['EcsParameters'] = {}
ecs_parameters = target['ecs_parameters']
if 'task_definition_arn' in target['ecs_parameters']:
target_request['EcsParameters']['TaskDefinitionArn'] = ecs_parameters['task_definition_arn']
if 'task_count' in target['ecs_parameters']:
target_request['EcsParameters']['TaskCount'] = ecs_parameters['task_count']
targets_request.append(target_request)
return targets_request
def _snakify(self, dict):
"""Converts cammel case to snake case"""
return camel_dict_to_snake_dict(dict)
class CloudWatchEventRuleManager(object):
RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn']
def __init__(self, rule, targets):
self.rule = rule
self.targets = targets
def ensure_present(self, enabled=True):
"""Ensures the rule and targets are present and synced"""
rule_description = self.rule.describe()
if rule_description:
# Rule exists so update rule, targets and state
self._sync_rule(enabled)
self._sync_targets()
self._sync_state(enabled)
else:
# Rule does not exist, so create new rule and targets
self._create(enabled)
def ensure_disabled(self):
"""Ensures the rule and targets are present, but disabled, and synced"""
self.ensure_present(enabled=False)
def ensure_absent(self):
"""Ensures the rule and targets are absent"""
rule_description = self.rule.describe()
if not rule_description:
# Rule doesn't exist so don't need to delete
return
self.rule.delete()
def fetch_aws_state(self):
"""Retrieves rule and target state from AWS"""
aws_state = {
'rule': {},
'targets': [],
'changed': self.rule.changed
}
rule_description = self.rule.describe()
if not rule_description:
return aws_state
# Don't need to include response metadata noise in response
del rule_description['response_metadata']
aws_state['rule'] = rule_description
aws_state['targets'].extend(self.rule.list_targets())
return aws_state
def _sync_rule(self, enabled=True):
"""Syncs local rule state with AWS"""
if not self._rule_matches_aws():
self.rule.put(enabled)
def _sync_targets(self):
"""Syncs local targets with AWS"""
# Identify and remove extraneous targets on AWS
target_ids_to_remove = self._remote_target_ids_to_remove()
if target_ids_to_remove:
self.rule.remove_targets(target_ids_to_remove)
# Identify targets that need to be added or updated on AWS
targets_to_put = self._targets_to_put()
if targets_to_put:
self.rule.put_targets(targets_to_put)
def _sync_state(self, enabled=True):
"""Syncs local rule state with AWS"""
remote_state = self._remote_state()
if enabled and remote_state != 'ENABLED':
self.rule.enable()
elif not enabled and remote_state != 'DISABLED':
self.rule.disable()
def _create(self, enabled=True):
"""Creates rule and targets on AWS"""
self.rule.put(enabled)
self.rule.put_targets(self.targets)
def _rule_matches_aws(self):
"""Checks if the local rule data matches AWS"""
aws_rule_data = self.rule.describe()
# The rule matches AWS only if all rule data fields are equal
# to their corresponding local value defined in the task
return all([
getattr(self.rule, field) == aws_rule_data.get(field, None)
for field in self.RULE_FIELDS
])
def _targets_to_put(self):
"""Returns a list of targets that need to be updated or added remotely"""
remote_targets = self.rule.list_targets()
return [t for t in self.targets if t not in remote_targets]
def _remote_target_ids_to_remove(self):
"""Returns a list of targets that need to be removed remotely"""
target_ids = [t['id'] for t in self.targets]
remote_targets = self.rule.list_targets()
return [
rt['id'] for rt in remote_targets if rt['id'] not in target_ids
]
def _remote_state(self):
"""Returns the remote state from AWS"""
description = self.rule.describe()
if not description:
return
return description['state']
def get_cloudwatchevents_client(module):
"""Returns a boto3 client for accessing CloudWatch Events"""
region, ec2_url, aws_conn_kwargs = get_aws_connection_info(module, boto3=True)
return boto3_conn(module, conn_type='client',
resource='events',
region=region, endpoint=ec2_url,
**aws_conn_kwargs)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
schedule_expression=dict(),
event_pattern=dict(),
state=dict(choices=['present', 'disabled', 'absent'],
default='present'),
description=dict(),
role_arn=dict(),
targets=dict(type='list', default=[]),
)
)
module = AnsibleAWSModule(argument_spec=argument_spec)
rule_data = dict(
[(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS]
)
targets = module.params.get('targets')
state = module.params.get('state')
cwe_rule = CloudWatchEventRule(module,
client=get_cloudwatchevents_client(module),
**rule_data)
cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets)
if state == 'present':
cwe_rule_manager.ensure_present()
elif state == 'disabled':
cwe_rule_manager.ensure_disabled()
elif state == 'absent':
cwe_rule_manager.ensure_absent()
else:
module.fail_json(msg="Invalid state '{0}' provided".format(state))
module.exit_json(**cwe_rule_manager.fetch_aws_state())
if __name__ == '__main__':
main()
|
ravibhure/ansible
|
lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py
|
Python
|
gpl-3.0
| 16,757
|
from collections import deque
from numpy import array as ar
import numpy
class Bonus(object):
def __init__(self,gdim,sd = 0.2, nTurn=50,scoreModif=1,col = (160,160,160,200),colNiddle = (30,30,30,200)):
self.nTurn = nTurn
self.gdim = gdim
self.scoreModif = scoreModif
self.sd = sd
self.n = 0
self.posi = ar([0,0],dtype=int)
self.colour=col
self.colNiddle = colNiddle
def tick(self,playerList):
self.n += 1;
#~ print self.n
if self.n >= self.nTurn:
self.reSpawn(playerList)
#~ print '_______'
#~ print self.n
def reSpawn(self,playerList):
end = False
pos = ar([0,0],dtype=int)
while end == False:
end = True
XY= numpy.random.normal(self.gdim/2,self.sd*self.gdim,2).astype(int)
XY[XY > (self.gdim-1)] = self.gdim-1
XY[XY < 0] = 0
for p in playerList:
for s in p.snake:
if sum(s != XY) == 0:
end = False
break
self.posi = XY
self.n = 0
|
qgeissmann/ouroboros
|
bonus.py
|
Python
|
gpl-3.0
| 1,195
|
from tests.modules.ffi.base import BaseFFITest
from rpython.rtyper.lltypesystem.llmemory import (
cast_ptr_to_adr as ptr2adr, cast_adr_to_int as adr2int)
class TestMemoryPointer(BaseFFITest):
def test_its_superclass_is_Pointer(self, space):
assert self.ask(
space, "FFI::MemoryPointer.superclass.equal?(FFI::Pointer)")
class TestMemoryPointer__new(BaseFFITest):
def test_it_sets_up_a_wrapped_type_object(self, space):
w_mem_ptr = space.execute("FFI::MemoryPointer.new(:int32, 1)")
assert w_mem_ptr.w_type == space.execute("FFI::Type::INT32")
def test_its_size_argument_defaults_to_1(self, space):
for t in ['char', 'short', 'int']:
w_ptr1 = space.execute("FFI::MemoryPointer.new(:%s, 1)" % t)
w_ptr2 = space.execute("FFI::MemoryPointer.new(:%s)" % t)
assert w_ptr1.sizeof_memory == w_ptr2.sizeof_memory
def test_it_also_lets_you_read_its_address(self, space):
w_results = space.execute("""
mem_ptr = FFI::MemoryPointer.new(:int8, 1)
[mem_ptr, mem_ptr.address]
""")
w_mem_ptr, w_address = space.listview(w_results)
expected = adr2int(ptr2adr(w_mem_ptr.ptr))
actual = self.unwrap(space, w_address)
assert expected == actual
def test_it_lets_you_read_and_write(self, space):
w_results = space.execute("""
str = "hel\\0lo"
sz = str.size
mem_ptr = FFI::MemoryPointer.new(:int8, sz)
mem_ptr.put_bytes(0, str, 0, sz)
[mem_ptr.get_bytes(0, sz), mem_ptr.get_string(0)]
""")
w_allbytes, w_firstbytes = space.listview(w_results)
assert self.unwrap(space, w_allbytes) == "hel\0lo"
assert self.unwrap(space, w_firstbytes) == "hel"
|
topazproject/topaz
|
tests/modules/ffi/test_memory_pointer.py
|
Python
|
bsd-3-clause
| 1,775
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "image_bank.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
kgantsov/image_bank
|
manage.py
|
Python
|
gpl-2.0
| 253
|
'''
This file contains the manually chosen admin forms, as needed for an easy-to-use
editor.
'''
from django.contrib import admin
from django.conf import settings
from metashare.repository.editor import admin_site as editor_site
from metashare.repository.editor.resource_editor import ResourceModelAdmin, \
LicenceModelAdmin
from metashare.repository.editor.superadmin import SchemaModelAdmin
from metashare.repository.models import resourceInfoType_model, \
identificationInfoType_model, metadataInfoType_model, \
communicationInfoType_model, validationInfoType_model, \
relationInfoType_model, foreseenUseInfoType_model, \
corpusMediaTypeType_model, corpusTextInfoType_model, \
corpusVideoInfoType_model, textNumericalFormatInfoType_model, \
videoClassificationInfoType_model, imageClassificationInfoType_model, \
participantInfoType_model, corpusAudioInfoType_model, \
corpusImageInfoType_model, corpusTextNumericalInfoType_model, \
corpusTextNgramInfoType_model, languageDescriptionInfoType_model, \
languageDescriptionTextInfoType_model, actualUseInfoType_model, \
languageDescriptionVideoInfoType_model, \
languageDescriptionImageInfoType_model, \
lexicalConceptualResourceInfoType_model, \
lexicalConceptualResourceTextInfoType_model, \
lexicalConceptualResourceAudioInfoType_model, \
lexicalConceptualResourceVideoInfoType_model, \
lexicalConceptualResourceImageInfoType_model, toolServiceInfoType_model, \
licenceInfoType_model, personInfoType_model, projectInfoType_model, \
documentInfoType_model, organizationInfoType_model, \
documentUnstructuredString_model
from metashare.repository.editor.related_mixin import RelatedAdminMixin
from django.views.decorators.csrf import csrf_protect
from django.db import transaction
from django.utils.decorators import method_decorator
from django.contrib.admin.util import unquote
from django.core.exceptions import PermissionDenied
from django.utils.html import escape
from django.utils.encoding import force_unicode
from django.http import Http404
from django.utils.safestring import mark_safe
from django.contrib.admin import helpers
from django.utils.translation import ugettext as _
from metashare.repository.editor.related_objects import AdminRelatedInfo
csrf_protect_m = method_decorator(csrf_protect)
# Custom admin classes
class CorpusTextInfoAdmin(SchemaModelAdmin):
hidden_fields = ('back_to_corpusmediatypetype_model', )
show_tabbed_fieldsets = True
class CorpusVideoInfoAdmin(SchemaModelAdmin):
hidden_fields = ('back_to_corpusmediatypetype_model', )
show_tabbed_fieldsets = True
class GenericTabbedAdmin(SchemaModelAdmin):
show_tabbed_fieldsets = True
class LexicalConceptualResourceInfoAdmin(SchemaModelAdmin):
readonly_fields = ('lexicalConceptualResourceMediaType', )
show_tabbed_fieldsets = True
class LanguageDescriptionInfoAdmin(SchemaModelAdmin):
readonly_fields = ('languageDescriptionMediaType', )
show_tabbed_fieldsets = True
class CorpusAudioModelAdmin(SchemaModelAdmin):
show_tabbed_fieldsets = True
class PersonModelAdmin(AdminRelatedInfo, SchemaModelAdmin):
exclude = ('source_url', 'copy_status')
list_display = ('instance_data', 'num_related_resources', 'related_resources')
def instance_data(self, obj):
return obj.__unicode__()
instance_data.short_description = _('Person')
class OrganizationModelAdmin(AdminRelatedInfo, SchemaModelAdmin):
exclude = ('source_url', 'copy_status')
list_display = ('instance_data', 'num_related_resources', 'related_resources')
def instance_data(self, obj):
return obj.__unicode__()
instance_data.short_description = _('Organization')
class ProjectModelAdmin(AdminRelatedInfo, SchemaModelAdmin):
exclude = ('source_url', 'copy_status')
list_display = ('instance_data', 'num_related_resources', 'related_resources')
def instance_data(self, obj):
return obj.__unicode__()
instance_data.short_description = _('Project')
class DocumentModelAdmin(AdminRelatedInfo, SchemaModelAdmin):
exclude = ('source_url', 'copy_status')
list_display = ('instance_data', 'num_related_resources', 'related_resources')
def instance_data(self, obj):
return obj.__unicode__()
instance_data.short_description = _('Document')
class DocumentUnstructuredStringModelAdmin(admin.ModelAdmin, RelatedAdminMixin):
def response_change(self, request, obj):
'''
Response sent after a successful submission of a change form.
We customize this to allow closing edit popups in the same way
as response_add deals with add popups.
'''
if '_popup_o2m' in request.REQUEST:
caller = None
if '_caller' in request.REQUEST:
caller = request.REQUEST['_caller']
return self.edit_response_close_popup_magic_o2m(obj, caller)
if '_popup' in request.REQUEST:
if request.POST.has_key("_continue"):
return self.save_and_continue_in_popup(obj, request)
return self.edit_response_close_popup_magic(obj)
else:
return super(DocumentUnstructuredStringModelAdmin, self).response_change(request, obj)
@csrf_protect_m
@transaction.commit_on_success
def change_view(self, request, object_id, extra_context=None):
"""
The 'change' admin view for this model.
This follows closely the base implementation from Django 1.3's
django.contrib.admin.options.ModelAdmin,
with the explicitly marked modifications.
"""
# pylint: disable-msg=C0103
model = self.model
opts = model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
#### begin modification ####
# make sure that the user has a full session length time for the current
# edit activity
request.session.set_expiry(settings.SESSION_COOKIE_AGE)
#### end modification ####
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
return self.add_view(request, form_url='../add/')
ModelForm = self.get_form(request, obj)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=True)
else:
form_validated = False
new_object = obj
if form_validated:
#### begin modification ####
self.save_model(request, new_object, form, change=True)
#### end modification ####
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form = ModelForm(instance=obj)
#### begin modification ####
media = self.media or []
#### end modification ####
inline_admin_formsets = []
#### begin modification ####
adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj),
self.prepopulated_fields, self.get_readonly_fields(request, obj),
model_admin=self)
media = media + adminForm.media
#### end modification ####
context = {
'title': _('Change %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'object_id': object_id,
'original': obj,
'is_popup': "_popup" in request.REQUEST or \
"_popup_o2m" in request.REQUEST,
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': opts.app_label,
'kb_link': settings.KNOWLEDGE_BASE_URL,
'comp_name': _('%s') % force_unicode(opts.verbose_name),
}
context.update(extra_context or {})
return self.render_change_form(request, context, change=True, obj=obj)
# Models which are always rendered inline so they don't need their own admin form:
purely_inline_models = (
actualUseInfoType_model,
identificationInfoType_model,
metadataInfoType_model,
communicationInfoType_model,
validationInfoType_model,
relationInfoType_model,
foreseenUseInfoType_model,
corpusMediaTypeType_model,
textNumericalFormatInfoType_model,
videoClassificationInfoType_model,
imageClassificationInfoType_model,
participantInfoType_model,
)
custom_admin_classes = {
resourceInfoType_model: ResourceModelAdmin,
corpusAudioInfoType_model: CorpusAudioModelAdmin,
corpusTextInfoType_model: CorpusTextInfoAdmin,
corpusVideoInfoType_model: CorpusVideoInfoAdmin,
corpusImageInfoType_model: GenericTabbedAdmin,
corpusTextNumericalInfoType_model: GenericTabbedAdmin,
corpusTextNgramInfoType_model: GenericTabbedAdmin,
languageDescriptionInfoType_model: LanguageDescriptionInfoAdmin,
languageDescriptionTextInfoType_model: GenericTabbedAdmin,
languageDescriptionVideoInfoType_model: GenericTabbedAdmin,
languageDescriptionImageInfoType_model: GenericTabbedAdmin,
lexicalConceptualResourceInfoType_model: LexicalConceptualResourceInfoAdmin,
lexicalConceptualResourceTextInfoType_model: GenericTabbedAdmin,
lexicalConceptualResourceAudioInfoType_model: GenericTabbedAdmin,
lexicalConceptualResourceVideoInfoType_model: GenericTabbedAdmin,
lexicalConceptualResourceImageInfoType_model: GenericTabbedAdmin,
toolServiceInfoType_model: GenericTabbedAdmin,
licenceInfoType_model: LicenceModelAdmin,
personInfoType_model: PersonModelAdmin,
organizationInfoType_model: OrganizationModelAdmin,
projectInfoType_model: ProjectModelAdmin,
documentInfoType_model: DocumentModelAdmin,
documentUnstructuredString_model: DocumentUnstructuredStringModelAdmin,
}
def register():
'''
Manual improvements over the automatically generated admin registration.
This presupposes the automatic parts have already been run.
'''
for model in purely_inline_models:
admin.site.unregister(model)
for modelclass, adminclass in custom_admin_classes.items():
admin.site.unregister(modelclass)
admin.site.register(modelclass, adminclass)
# And finally, make sure that our editor has the exact same model/admin pairs registered:
for modelclass, adminobject in admin.site._registry.items():
editor_site.register(modelclass, adminobject.__class__)
|
JuliBakagianni/CEF-ELRC
|
metashare/repository/editor/manual_admin_registration.py
|
Python
|
bsd-3-clause
| 11,138
|
import os
from quads.model import Cloud, Host
from quads.tools.ssh_helper import SSHHelper
LSHW_OUTPUT_DIR = "/var/www/html/lshw/"
def run_lshw(hostname: str, file_path: str) -> None:
"""
Connect via SSHHElper with the remote host and run lshw command
:param hostname: the remote host FQDN
:param file_path: the full file path were the output of lshw is stored
:return: None
"""
try:
ssh_helper = SSHHelper(hostname)
except Exception:
print(f"Something went wrong trying to connect to: {hostname}")
return
_, output = ssh_helper.run_cmd("lshw -xml")
if output:
with open(file_path, "w") as _file:
for line in output:
_file.writelines(line)
def main() -> None:
"""
Main function
:return: None
"""
cloud = Cloud.objects(name="cloud01").first()
hosts = Host.objects(cloud=cloud, retired=False, broken=False)
for host in hosts:
file_name = f"{host.name}.xml"
file_path = os.path.join(LSHW_OUTPUT_DIR, file_name)
if os.path.exists(file_path):
if os.path.getsize(file_path) < 1:
run_lshw(host.name, file_path)
else:
run_lshw(host.name, file_path)
if __name__ == "__main__":
main()
|
redhat-performance/quads
|
quads/tools/lshw.py
|
Python
|
gpl-3.0
| 1,287
|
"""TLS Lite + asyncore."""
import asyncore
from gdata.tlslite.TLSConnection import TLSConnection
from AsyncStateMachine import AsyncStateMachine
class TLSAsyncDispatcherMixIn(AsyncStateMachine):
"""This class can be "mixed in" with an
L{asyncore.dispatcher} to add TLS support.
This class essentially sits between the dispatcher and the select
loop, intercepting events and only calling the dispatcher when
applicable.
In the case of handle_read(), a read operation will be activated,
and when it completes, the bytes will be placed in a buffer where
the dispatcher can retrieve them by calling recv(), and the
dispatcher's handle_read() will be called.
In the case of handle_write(), the dispatcher's handle_write() will
be called, and when it calls send(), a write operation will be
activated.
To use this class, you must combine it with an asyncore.dispatcher,
and pass in a handshake operation with setServerHandshakeOp().
Below is an example of using this class with medusa. This class is
mixed in with http_channel to create http_tls_channel. Note:
1. the mix-in is listed first in the inheritance list
2. the input buffer size must be at least 16K, otherwise the
dispatcher might not read all the bytes from the TLS layer,
leaving some bytes in limbo.
3. IE seems to have a problem receiving a whole HTTP response in a
single TLS record, so HTML pages containing '\\r\\n\\r\\n' won't
be displayed on IE.
Add the following text into 'start_medusa.py', in the 'HTTP Server'
section::
from tlslite.api import *
s = open("./serverX509Cert.pem").read()
x509 = X509()
x509.parse(s)
certChain = X509CertChain([x509])
s = open("./serverX509Key.pem").read()
privateKey = parsePEMKey(s, private=True)
class http_tls_channel(TLSAsyncDispatcherMixIn,
http_server.http_channel):
ac_in_buffer_size = 16384
def __init__ (self, server, conn, addr):
http_server.http_channel.__init__(self, server, conn, addr)
TLSAsyncDispatcherMixIn.__init__(self, conn)
self.tlsConnection.ignoreAbruptClose = True
self.setServerHandshakeOp(certChain=certChain,
privateKey=privateKey)
hs.channel_class = http_tls_channel
If the TLS layer raises an exception, the exception will be caught
in asyncore.dispatcher, which will call close() on this class. The
TLS layer always closes the TLS connection before raising an
exception, so the close operation will complete right away, causing
asyncore.dispatcher.close() to be called, which closes the socket
and removes this instance from the asyncore loop.
"""
def __init__(self, sock=None):
AsyncStateMachine.__init__(self)
if sock:
self.tlsConnection = TLSConnection(sock)
#Calculate the sibling I'm being mixed in with.
#This is necessary since we override functions
#like readable(), handle_read(), etc., but we
#also want to call the sibling's versions.
for cl in self.__class__.__bases__:
if cl != TLSAsyncDispatcherMixIn and cl != AsyncStateMachine:
self.siblingClass = cl
break
else:
raise AssertionError()
def readable(self):
result = self.wantsReadEvent()
if result != None:
return result
return self.siblingClass.readable(self)
def writable(self):
result = self.wantsWriteEvent()
if result != None:
return result
return self.siblingClass.writable(self)
def handle_read(self):
self.inReadEvent()
def handle_write(self):
self.inWriteEvent()
def outConnectEvent(self):
self.siblingClass.handle_connect(self)
def outCloseEvent(self):
asyncore.dispatcher.close(self)
def outReadEvent(self, readBuffer):
self.readBuffer = readBuffer
self.siblingClass.handle_read(self)
def outWriteEvent(self):
self.siblingClass.handle_write(self)
def recv(self, bufferSize=16384):
if bufferSize < 16384 or self.readBuffer == None:
raise AssertionError()
returnValue = self.readBuffer
self.readBuffer = None
return returnValue
def send(self, writeBuffer):
self.setWriteOp(writeBuffer)
return len(writeBuffer)
def close(self):
if hasattr(self, "tlsConnection"):
self.setCloseOp()
else:
asyncore.dispatcher.close(self)
|
CollabQ/CollabQ
|
vendor/gdata/tlslite/integration/TLSAsyncDispatcherMixIn.py
|
Python
|
apache-2.0
| 4,739
|
# -*- coding: utf-8 -*-
""" Provides class to run TankCore from python """
import logging
import os
import sys
import time
import traceback
import fnmatch
from pkg_resources import resource_filename
from ..core import tankcore
class ApiWorker:
""" Worker class that runs tank core via python """
def __init__(self):
self.core = tankcore.TankCore()
self.baseconfigs_location = '/etc/yandex-tank'
self.log = logging.getLogger(__name__)
def init_logging(self, log_filename="tank.log"):
""" Set up logging """
logger = logging.getLogger('')
self.log_filename = log_filename
self.core.add_artifact_file(self.log_filename)
file_handler = logging.FileHandler(self.log_filename)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(
logging.Formatter(
"%(asctime)s [%(levelname)s] %(name)s %(message)s"))
logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout)
stderr_hdl = logging.StreamHandler(sys.stderr)
# fmt_verbose = logging.Formatter(
# "%(asctime)s [%(levelname)s] %(name)s %(message)s")
fmt_regular = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s", "%H:%M:%S")
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(fmt_regular)
stderr_hdl.setFormatter(fmt_regular)
f_err = SingleLevelFilter(logging.ERROR, True)
f_warn = SingleLevelFilter(logging.WARNING, True)
f_crit = SingleLevelFilter(logging.CRITICAL, True)
console_handler.addFilter(f_err)
console_handler.addFilter(f_warn)
console_handler.addFilter(f_crit)
logger.addHandler(console_handler)
f_info = SingleLevelFilter(logging.INFO, True)
f_debug = SingleLevelFilter(logging.DEBUG, True)
stderr_hdl.addFilter(f_info)
stderr_hdl.addFilter(f_debug)
logger.addHandler(stderr_hdl)
def __add_user_options(self):
""" override config options with user specified options"""
if self.options.get('user_options', None):
self.core.apply_shorthand_options(self.options['user_options'])
def configure(self, options):
""" Make preparations before running Tank """
self.options = options
if self.options.get('lock_dir', None):
self.core.set_option(
self.core.SECTION, "lock_dir", self.options['lock_dir'])
while True:
try:
self.core.get_lock(self.options.get('ignore_lock', None))
break
except Exception as exc:
if self.options.get('lock_fail', None):
raise RuntimeError("Lock file present, cannot continue")
self.log.info(
"Couldn't get lock. Will retry in 5 seconds... (%s)",
str(exc))
time.sleep(5)
configs = self.get_default_configs()
if self.options.get('config', None):
configs.append(self.options['config'])
self.core.load_configs(configs)
self.__add_user_options()
self.core.load_plugins()
if self.options.get('ignore_lock', None):
self.core.set_option(self.core.SECTION, self.IGNORE_LOCKS, "1")
def perform_test(self):
""" Run the test and wait for finish """
self.log.info("Performing test...")
retcode = 1
try:
self.core.plugins_configure()
self.core.plugins_prepare_test()
if self.options.get('manual_start', None):
self.log.info(
"Manual start option specified, waiting for user actions")
raw_input("Press Enter key to start test")
self.core.plugins_start_test()
retcode = self.core.wait_for_finish()
retcode = self.core.plugins_end_test(retcode)
retcode = self.core.plugins_post_process(retcode)
except KeyboardInterrupt as ex:
self.log.info(
"Do not press Ctrl+C again, the test will be broken otherwise")
self.log.debug(
"Caught KeyboardInterrupt: %s", traceback.format_exc(ex))
try:
retcode = self.__graceful_shutdown()
except KeyboardInterrupt as ex:
self.log.debug(
"Caught KeyboardInterrupt again: %s",
traceback.format_exc(ex))
self.log.info(
"User insists on exiting, aborting graceful shutdown...")
retcode = 1
except Exception as ex:
self.log.info("Exception: %s", traceback.format_exc(ex))
self.log.error("%s", ex)
retcode = self.__graceful_shutdown()
self.core.release_lock()
self.log.info("Done performing test with code %s", retcode)
return retcode
def get_default_configs(self):
""" returns default configs list, from /etc, home dir and package_data"""
# initialize basic defaults
configs = [resource_filename(__name__, 'config/00-base.ini')]
try:
conf_files = sorted(os.listdir(self.baseconfigs_location))
for filename in conf_files:
if fnmatch.fnmatch(filename, '*.ini'):
configs += [
os.path.realpath(
self.baseconfigs_location + os.sep + filename)
]
except OSError:
self.log.warn(
self.baseconfigs_location +
' is not accessible to get configs list')
configs += [os.path.expanduser('~/.yandex-tank')]
return configs
def __graceful_shutdown(self):
""" call shutdown routines """
retcode = 1
self.log.info("Trying to shutdown gracefully...")
retcode = self.core.plugins_end_test(retcode)
retcode = self.core.plugins_post_process(retcode)
self.log.info("Done graceful shutdown")
return retcode
class SingleLevelFilter(logging.Filter):
"""Exclude or approve one msg type at a time. """
def __init__(self, passlevel, reject):
logging.Filter.__init__(self)
self.passlevel = passlevel
self.reject = reject
def filter(self, record):
if self.reject:
return record.levelno != self.passlevel
else:
return record.levelno == self.passlevel
|
nnugumanov/yandex-tank
|
yandextank/api/apiworker.py
|
Python
|
lgpl-2.1
| 6,566
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=wildcard-import, unused-wildcard-import, too-many-lines
"""Sparse NDArray API of MXNet."""
from __future__ import absolute_import
from __future__ import division
try:
from __builtin__ import slice as py_slice
from __builtin__ import sum as py_sum
except ImportError:
from builtins import slice as py_slice
from builtins import sum as py_sum
import ctypes
import warnings
import operator
from array import array as native_array
__all__ = ["_ndarray_cls", "csr_matrix", "row_sparse_array",
"BaseSparseNDArray", "CSRNDArray", "RowSparseNDArray",
"add", "subtract", "multiply", "divide"]
import numpy as np
from ..base import NotSupportedForSparseNDArray
from ..base import _LIB, numeric_types
from ..base import c_array_buf, mx_real_t, integer_types
from ..base import mx_uint, NDArrayHandle, check_call
from ..context import Context
from . import _internal
from . import op
try:
from .gen_sparse import * # pylint: disable=redefined-builtin
except ImportError:
pass
from ._internal import _set_ndarray_class
from .ndarray import NDArray, _storage_type, _DTYPE_NP_TO_MX, _DTYPE_MX_TO_NP
from .ndarray import _STORAGE_TYPE_STR_TO_ID, _STORAGE_TYPE_ROW_SPARSE, _STORAGE_TYPE_CSR
from .ndarray import _STORAGE_TYPE_UNDEFINED, _STORAGE_TYPE_DEFAULT
from .ndarray import zeros as _zeros_ndarray
from .ndarray import array as _array
from .ndarray import _ufunc_helper
try:
import scipy.sparse as spsp
except ImportError:
spsp = None
_STORAGE_AUX_TYPES = {
'row_sparse': [np.int64],
'csr': [np.int64, np.int64]
}
def _new_alloc_handle(stype, shape, ctx, delay_alloc, dtype, aux_types, aux_shapes=None):
"""Return a new handle with specified storage type, shape, dtype and context.
Empty handle is only used to hold results
Returns
-------
handle
A new empty ndarray handle
"""
hdl = NDArrayHandle()
for aux_t in aux_types:
if np.dtype(aux_t) != np.dtype("int64"):
raise NotImplementedError("only int64 is supported for aux types")
aux_type_ids = [int(_DTYPE_NP_TO_MX[np.dtype(aux_t).type]) for aux_t in aux_types]
aux_shapes = [(0,) for aux_t in aux_types] if aux_shapes is None else aux_shapes
aux_shape_lens = [len(aux_shape) for aux_shape in aux_shapes]
aux_shapes = py_sum(aux_shapes, ())
num_aux = mx_uint(len(aux_types))
check_call(_LIB.MXNDArrayCreateSparseEx(
ctypes.c_int(int(_STORAGE_TYPE_STR_TO_ID[stype])),
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
num_aux,
c_array_buf(ctypes.c_int, native_array('i', aux_type_ids)),
c_array_buf(mx_uint, native_array('I', aux_shape_lens)),
c_array_buf(mx_uint, native_array('I', aux_shapes)),
ctypes.byref(hdl)))
return hdl
class BaseSparseNDArray(NDArray):
"""The base class of an NDArray stored in a sparse storage format.
See CSRNDArray and RowSparseNDArray for more details.
"""
def __repr__(self):
"""Returns a string representation of the sparse array."""
shape_info = 'x'.join(['%d' % x for x in self.shape])
# The data content is not displayed since the array usually has big shape
return '\n<%s %s @%s>' % (self.__class__.__name__,
shape_info, self.context)
def __add__(self, other):
return add(self, other)
def __sub__(self, other):
return subtract(self, other)
def __mul__(self, other):
return multiply(self, other)
def __div__(self, other):
return divide(self, other)
def __iadd__(self, other):
raise NotImplementedError()
def __isub__(self, other):
raise NotImplementedError()
def __imul__(self, other):
raise NotImplementedError()
def __idiv__(self, other):
raise NotImplementedError()
def __itruediv__(self, other):
raise NotImplementedError()
def _sync_copyfrom(self, source_array):
raise NotImplementedError()
def _at(self, idx):
raise NotSupportedForSparseNDArray(self._at, '[idx]', idx)
def _slice(self, start, stop):
raise NotSupportedForSparseNDArray(self._slice, None, start, stop)
def reshape(self, *shape, **kwargs):
raise NotSupportedForSparseNDArray(self.reshape, None, shape)
@property
def size(self):
# the `size` for a sparse ndarray is ambiguous, hence disabled.
raise NotImplementedError()
def _aux_type(self, i):
"""Data-type of the array's ith aux data.
Returns
-------
numpy.dtype
This BaseSparseNDArray's aux data type.
"""
aux_type = ctypes.c_int()
check_call(_LIB.MXNDArrayGetAuxType(self.handle, i, ctypes.byref(aux_type)))
return _DTYPE_MX_TO_NP[aux_type.value]
@property
def _num_aux(self):
"""The number of aux data used to help store the sparse ndarray.
"""
return len(_STORAGE_AUX_TYPES[self.stype])
@property
def _aux_types(self):
"""The data types of the aux data for the BaseSparseNDArray.
"""
aux_types = []
num_aux = self._num_aux
for i in range(num_aux):
aux_types.append(self._aux_type(i))
return aux_types
def asnumpy(self):
"""Return a dense ``numpy.ndarray`` object with value copied from this array
"""
return self.tostype('default').asnumpy()
def astype(self, dtype, copy=True):
"""Returns a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
copy : bool
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
if not copy and np.dtype(dtype) == self.dtype:
return self
res = zeros(shape=self.shape, ctx=self.context,
dtype=dtype, stype=self.stype)
self.copyto(res)
return res
def copyto(self, other):
"""Copies the value of this array to another array.
Parameters
----------
other : NDArray or CSRNDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or CSRNDArray or RowSparseNDArray
The copied array.
"""
# pylint: disable= no-member, protected-access
if isinstance(other, NDArray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return False
return _internal._copyto(self, out=other)
elif isinstance(other, Context):
hret = _ndarray_cls(_new_alloc_handle(self.stype, self.shape, other,
True, self.dtype, self._aux_types))
return _internal._copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
# pylint: enable= no-member, protected-access
def check_format(self, full_check=True):
"""Check whether the NDArray format is valid.
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
check_call(_LIB.MXNDArraySyncCheckFormat(self.handle, ctypes.c_bool(full_check)))
def _data(self):
"""A deep copy NDArray of the data array associated with the BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetDataNDArray(self.handle, ctypes.byref(hdl)))
return NDArray(hdl)
def _aux_data(self, i):
""" Get a deep copy NDArray of the i-th aux data array associated with the
BaseSparseNDArray.
This function blocks. Do not use it in performance critical code.
"""
self.wait_to_read()
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetAuxNDArray(self.handle, i, ctypes.byref(hdl)))
return NDArray(hdl)
# pylint: disable=abstract-method
class CSRNDArray(BaseSparseNDArray):
"""A sparse representation of 2D NDArray in the Compressed Sparse Row format.
A CSRNDArray represents an NDArray as three separate arrays: `data`,
`indptr` and `indices`. It uses the CSR representation where the column indices for
row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their corresponding values are stored
in ``data[indptr[i]:indptr[i+1]]``.
The column indices for a given row are expected to be sorted in ascending order.
Duplicate column entries for the same row are not allowed.
Example
-------
>>> a = mx.nd.array([[0, 1, 0], [2, 0, 0], [0, 0, 0], [0, 0, 3]])
>>> a = a.tostype('csr')
>>> a.data.asnumpy()
array([ 1., 2., 3.], dtype=float32)
>>> a.indices.asnumpy()
array([1, 0, 2])
>>> a.indptr.asnumpy()
array([0, 1, 2, 2, 3])
See Also
--------
csr_matrix: Several ways to construct a CSRNDArray
"""
def __reduce__(self):
return CSRNDArray, (None,), super(CSRNDArray, self).__getstate__()
def __iadd__(self, other):
(self + other).copyto(self)
return self
def __isub__(self, other):
(self - other).copyto(self)
return self
def __imul__(self, other):
(self * other).copyto(self)
return self
def __idiv__(self, other):
(self / other).copyto(self)
return self
def __itruediv__(self, other):
(self / other).copyto(self)
return self
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a newly created NDArray based on the indexing key.
Parameters
----------
key : int or slice
Indexing key.
Examples
--------
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6])
>>> a = mx.nd.sparse.csr_matrix((data, indices, indptr), shape=(3, 3))
>>> a.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> a[1:2].asnumpy()
array([[ 0., 0., 3.]], dtype=float32)
>>> a[1].asnumpy()
array([[ 0., 0., 3.]], dtype=float32)
>>> a[-1].asnumpy()
array([[ 4., 5., 6.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(key, int):
if key == -1:
begin = self.shape[0] - 1
else:
begin = key
return op.slice(self, begin=begin, end=begin+1)
if isinstance(key, py_slice):
if key.step is not None:
raise ValueError('CSRNDArray only supports continuous slicing on axis 0')
if key.start is not None or key.stop is not None:
begin = key.start if key.start else 0
end = key.stop if key.stop else self.shape[0]
return op.slice(self, begin=begin, end=end)
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
raise ValueError('Undefined behaviour for {}'.format(key))
# pylint: enable= no-member, protected-access
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value. Only slice key [:] is supported.
Parameters
----------
key : slice
The indexing key.
value : NDArray or CSRNDArray or numpy.ndarray
The value to set.
Examples
--------
>>> src = mx.nd.sparse.zeros('csr', (3,3))
>>> src.asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> # assign CSRNDArray with same storage type
>>> x = mx.nd.ones((3,3)).tostype('csr')
>>> x[:] = src
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> # assign NDArray to CSRNDArray
>>> x[:] = mx.nd.ones((3,3)) * 2
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
if not self.writable:
raise ValueError('Failed to assign to a readonly CSRNDArray')
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise ValueError('Assignment with slice for CSRNDArray is not ' \
'implmented yet.')
if isinstance(value, NDArray):
# avoid copying to itself
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
raise ValueError("Assigning numeric types to CSRNDArray is " \
"not implemented yet.")
elif isinstance(value, (np.ndarray, np.generic)):
# TODO(haibin/anisub) check scipy.sparse and use _sync_copy_from to
# avoid the temporary copy
warnings.warn('Assigning non-NDArray object to CSRNDArray is not efficient',
RuntimeWarning)
tmp = _array(value)
tmp.copyto(self)
else:
raise TypeError('type %s not supported' % str(type(value)))
else:
assert(isinstance(key, (int, tuple)))
raise Exception('CSRNDArray only supports [:] for assignment')
@property
def indices(self):
"""A deep copy NDArray of the indices array of the CSRNDArray.
This generates a deep copy of the column indices of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's indices array.
"""
return self._aux_data(1)
@property
def indptr(self):
"""A deep copy NDArray of the indptr array of the CSRNDArray.
This generates a deep copy of the `indptr` of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's indptr array.
"""
return self._aux_data(0)
@property
def data(self):
"""A deep copy NDArray of the data array of the CSRNDArray.
This generates a deep copy of the `data` of the current `csr` matrix.
Returns
-------
NDArray
This CSRNDArray's data array.
"""
return self._data()
@indices.setter
def indices(self, indices):
raise NotImplementedError()
@indptr.setter
def indptr(self, indptr):
raise NotImplementedError()
@data.setter
def data(self, data):
raise NotImplementedError()
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or CSRNDArray
A copy of the array with the chosen storage stype
"""
# pylint: disable= no-member, protected-access
if stype == 'row_sparse':
raise ValueError("cast_storage from csr to row_sparse is not supported")
return op.cast_storage(self, stype=stype)
# pylint: enable= no-member, protected-access
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``CSRNDArray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``CSRNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or CSRNDArray or Context
The destination array or context.
Returns
-------
NDArray or CSRNDArray
The copied array. If ``other`` is an ``NDArray`` or ``CSRNDArray``, then the return
value and ``other`` will point to the same ``NDArray`` or ``CSRNDArray``.
"""
if isinstance(other, Context):
return super(CSRNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype == 'default' or stype == 'csr':
return super(CSRNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def asscipy(self):
"""Returns a ``scipy.sparse.csr.csr_matrix`` object with value copied from this array
Examples
--------
>>> x = mx.nd.sparse.zeros('csr', (2,3))
>>> y = x.asscipy()
>>> type(y)
<type 'scipy.sparse.csr.csr_matrix'>
>>> y
<2x3 sparse matrix of type '<type 'numpy.float32'>'
with 0 stored elements in Compressed Sparse Row format>
"""
data = self.data.asnumpy()
indices = self.indices.asnumpy()
indptr = self.indptr.asnumpy()
if not spsp:
raise ImportError("scipy is not available. \
Please check if the scipy python bindings are installed.")
return spsp.csr_matrix((data, indices, indptr), shape=self.shape, dtype=self.dtype)
# pylint: disable=abstract-method
class RowSparseNDArray(BaseSparseNDArray):
"""A sparse representation of a set of NDArray row slices at given indices.
A RowSparseNDArray represents a multidimensional NDArray using two separate arrays: `data` and
`indices`. The number of dimensions has to be at least 2.
- data: an NDArray of any dtype with shape [D0, D1, ..., Dn].
- indices: a 1-D int64 NDArray with shape [D0] with values sorted in ascending order.
The `indices` stores the indices of the row slices with non-zeros,
while the values are stored in `data`. The corresponding NDArray ``dense``
represented by RowSparseNDArray ``rsp`` has
``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``
>>> dense.asnumpy()
array([[ 1., 2., 3.],
[ 0., 0., 0.],
[ 4., 0., 5.],
[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> rsp = dense.tostype('row_sparse')
>>> rsp.indices.asnumpy()
array([0, 2], dtype=int64)
>>> rsp.data.asnumpy()
array([[ 1., 2., 3.],
[ 4., 0., 5.]], dtype=float32)
A RowSparseNDArray is typically used to represent non-zero row slices of a large NDArray
of shape [LARGE0, D1, .. , Dn] where LARGE0 >> D0 and most row slices are zeros.
RowSparseNDArray is used principally in the definition of gradients for operations
that have sparse gradients (e.g. sparse dot and sparse embedding).
See Also
--------
row_sparse_array: Several ways to construct a RowSparseNDArray
"""
def __reduce__(self):
return RowSparseNDArray, (None,), super(RowSparseNDArray, self).__getstate__()
def __iadd__(self, other):
(self + other).copyto(self)
return self
def __isub__(self, other):
(self - other).copyto(self)
return self
def __imul__(self, other):
(self * other).copyto(self)
return self
def __idiv__(self, other):
(self / other).copyto(self)
return self
def __itruediv__(self, other):
(self / other).copyto(self)
return self
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of this array.
Parameters
----------
key : slice
Indexing key.
Examples
--------
>>> x = mx.nd.sparse.zeros('row_sparse', (2, 3))
>>> x[:].asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
"""
if isinstance(key, int):
raise Exception("__getitem__ with int key is not implemented for RowSparseNDArray yet")
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise Exception('RowSparseNDArray only supports [:] for __getitem__')
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
raise ValueError('Undefined behaviour for {}'.format(key))
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value. Only slice key [:] is supported.
Parameters
----------
key : slice
The indexing key.
value : NDArray or numpy.ndarray
The value to set.
Examples
--------
>>> src = mx.nd.row_sparse([[1, 0, 2], [4, 5, 6]], [0, 2], (3,3))
>>> src.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 0.],
[ 4., 5., 6.]], dtype=float32)
>>> # assign RowSparseNDArray with same storage type
>>> x = mx.nd.sparse.zeros('row_sparse', (3,3))
>>> x[:] = src
>>> x.asnumpy()
array([[ 1., 0., 2.],
[ 0., 0., 0.],
[ 4., 5., 6.]], dtype=float32)
>>> # assign NDArray to RowSparseNDArray
>>> x[:] = mx.nd.ones((3,3))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if not self.writable:
raise ValueError('Failed to assign to a readonly RowSparseNDArray')
if isinstance(key, py_slice):
if key.step is not None or key.start is not None or key.stop is not None:
raise ValueError('Assignment with slice for RowSparseNDArray ' \
'is not implmented yet.')
if isinstance(value, NDArray):
# avoid copying to itself
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
_internal._set_value(float(value), out=self)
elif isinstance(value, (np.ndarray, np.generic)):
warnings.warn('Assigning non-NDArray object to RowSparseNDArray is not efficient',
RuntimeWarning)
tmp = _array(value)
tmp.copyto(self)
else:
raise TypeError('type %s not supported' % str(type(value)))
else:
assert(isinstance(key, (int, tuple)))
raise TypeError('RowSparseNDArray only supports [:] for assignment')
# pylint: enable= no-member, protected-access
@property
def indices(self):
"""A deep copy NDArray of the indices array of the RowSparseNDArray.
This generates a deep copy of the row indices of the current `row_sparse` matrix.
Returns
-------
NDArray
This RowSparseNDArray's indices array.
"""
return self._aux_data(0)
@property
def data(self):
"""A deep copy NDArray of the data array of the RowSparseNDArray.
This generates a deep copy of the `data` of the current `row_sparse` matrix.
Returns
-------
NDArray
This RowSparseNDArray's data array.
"""
return self._data()
@indices.setter
def indices(self, indices):
raise NotImplementedError()
@data.setter
def data(self, data):
raise NotImplementedError()
def tostype(self, stype):
"""Return a copy of the array with chosen storage type.
Returns
-------
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype
"""
# pylint: disable= no-member, protected-access
if stype == 'csr':
raise ValueError("cast_storage from row_sparse to csr is not supported")
return op.cast_storage(self, stype=stype)
# pylint: enable= no-member, protected-access
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``
and ``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the
return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
"""
if isinstance(other, Context):
return super(RowSparseNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype == 'default' or stype == 'row_sparse':
return super(RowSparseNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def retain(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`retain`.
The arguments are the same as for :py:func:`retain`, with
this array as data.
"""
return retain(self, *args, **kwargs)
def _prepare_src_array(source_array, dtype):
"""Prepare `source_array` so that it can be used to construct NDArray.
`source_array` is converted to a `np.ndarray` if it's neither an `NDArray` \
nor an `np.ndarray`.
"""
if not isinstance(source_array, NDArray) and not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=dtype)
except:
raise TypeError('values must be array like object')
return source_array
def _prepare_default_dtype(src_array, dtype):
"""Prepare the value of dtype if `dtype` is None. If `src_array` is an NDArray, numpy.ndarray
or scipy.sparse.csr.csr_matrix, return src_array.dtype. float32 is returned otherwise."""
if dtype is None:
if isinstance(src_array, (NDArray, np.ndarray)):
dtype = src_array.dtype
elif spsp and isinstance(src_array, spsp.csr.csr_matrix):
dtype = src_array.dtype
else:
dtype = mx_real_t
return dtype
def _check_shape(s1, s2):
"""check s1 == s2 if both are not None"""
if s1 and s2 and s1 != s2:
raise ValueError("Shape mismatch detected. " + str(s1) + " v.s. " + str(s2))
def csr_matrix(arg1, shape=None, ctx=None, dtype=None):
"""Creates a `CSRNDArray`, an 2D array with compressed sparse row (CSR) format.
The CSRNDArray can be instantiated in several ways:
- csr_matrix(D):
to construct a CSRNDArray with a dense 2D array ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix(S)
to construct a CSRNDArray with a sparse 2D array ``S``
- **S** (*CSRNDArray or scipy.sparse.csr.csr_matrix*) - A sparse matrix.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- csr_matrix((M, N))
to construct an empty CSRNDArray with shape ``(M, N)``
- **M** (*int*) - Number of rows in the matrix
- **N** (*int*) - Number of columns in the matrix
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- csr_matrix((data, indices, indptr))
to construct a CSRNDArray based on the definition of compressed sparse row format \
using three separate arrays, \
where the column indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` \
and their corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. \
The column indices for a given row are expected to be **sorted in ascending order.** \
Duplicate column entries for the same row are not allowed.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in row-major order.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the column index for each non-zero element in ``data``.
- **indptr** (*array_like*) - An object exposing the array interface, which \
stores the offset into ``data`` of the first non-zero element number of each \
row of the matrix.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``data.dtype`` if ``data`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- csr_matrix((data, (row, col)))
to construct a CSRNDArray based on the COOrdinate format \
using three seperate arrays, \
where ``row[i]`` is the row index of the element, \
``col[i]`` is the column index of the element \
and ``data[i]`` is the data corresponding to the element. All the missing \
elements in the input are taken to be zeroes.
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero entries of the matrix in COO format.
- **row** (*array_like*) - An object exposing the array interface, which \
stores the row index for each non zero element in ``data``.
- **col** (*array_like*) - An object exposing the array interface, which \
stores the col index for each non zero element in ``data``.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the ``row`` and ``col`` arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1: tuple of int, tuple of array_like, array_like, CSRNDArray, scipy.sparse.csr_matrix, \
scipy.sparse.coo_matrix, tuple of int or tuple of array_like
The argument to help instantiate the csr matrix. See above for further details.
shape : tuple of int, optional
The shape of the csr matrix.
ctx: Context, optional
Device context (default is the current default context).
dtype: str or numpy.dtype, optional
The data type of the output array.
Returns
-------
CSRNDArray
A `CSRNDArray` with the `csr` storage representation.
Example
-------
>>> a = mx.nd.sparse.csr_matrix(([1, 2, 3], [1, 0, 2], [0, 1, 2, 2, 3]), shape=(4, 3))
>>> a.asnumpy()
array([[ 0., 1., 0.],
[ 2., 0., 0.],
[ 0., 0., 0.],
[ 0., 0., 3.]], dtype=float32)
See Also
--------
CSRNDArray : MXNet NDArray in compressed sparse row format.
"""
# construct a csr matrix from (M, N) or (data, indices, indptr)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len == 2:
# construct a sparse csr matrix from
# scipy coo matrix if input format is coo
if isinstance(arg1[1], tuple) and len(arg1[1]) == 2:
data, (row, col) = arg1
if isinstance(data, NDArray):
data = data.asnumpy()
if isinstance(row, NDArray):
row = row.asnumpy()
if isinstance(col, NDArray):
col = col.asnumpy()
coo = spsp.coo_matrix((data, (row, col)), shape=shape)
_check_shape(coo.shape, shape)
csr = coo.tocsr()
return array(csr, ctx=ctx, dtype=dtype)
else:
# empty matrix with shape
_check_shape(arg1, shape)
return empty('csr', arg1, ctx=ctx, dtype=dtype)
elif arg_len == 3:
# data, indices, indptr
return _csr_matrix_from_definition(arg1[0], arg1[1], arg1[2], shape=shape,
ctx=ctx, dtype=dtype)
else:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
else:
# construct a csr matrix from a sparse / dense one
if isinstance(arg1, CSRNDArray) or (spsp and isinstance(arg1, spsp.csr.csr_matrix)):
# construct a csr matrix from scipy or CSRNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, RowSparseNDArray):
raise ValueError("Unexpected input type: RowSparseNDArray")
else:
# construct a csr matrix from a dense one
# prepare default ctx and dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('csr')
def _csr_matrix_from_definition(data, indices, indptr, shape=None, ctx=None,
dtype=None, indices_type=None, indptr_type=None):
"""Create a `CSRNDArray` based on data, indices and indptr"""
# pylint: disable= no-member, protected-access
storage_type = 'csr'
# context
ctx = Context.default_ctx if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indptr_type = _STORAGE_AUX_TYPES[storage_type][0] if indptr_type is None else indptr_type
indices_type = _STORAGE_AUX_TYPES[storage_type][1] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indptr = _prepare_src_array(indptr, indptr_type)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indptr, NDArray):
indptr = _array(indptr, ctx, indptr_type)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
if indices.shape[0] == 0:
raise ValueError('invalid shape')
shape = (len(indptr) - 1, op.max(indices).asscalar() + 1)
# verify shapes
aux_shapes = [indptr.shape, indices.shape]
if data.ndim != 1 or indptr.ndim != 1 or indices.ndim != 1 or \
indptr.shape[0] == 0 or len(shape) != 2:
raise ValueError('invalid shape')
result = CSRNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indptr_type, indices_type], aux_shapes))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indptr.handle, ctypes.c_int(0)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(1)))
return result
# pylint: enable= no-member, protected-access
def row_sparse_array(arg1, shape=None, ctx=None, dtype=None):
"""Creates a `RowSparseNDArray`, a multidimensional row sparse array with a set of \
tensor slices at given indices.
The RowSparseNDArray can be instantiated in several ways:
- row_sparse_array(D):
to construct a RowSparseNDArray with a dense ndarray ``D``
- **D** (*array_like*) - An object exposing the array interface, an object whose \
`__array__` method returns an array, or any (nested) sequence.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``D.dtype`` if ``D`` is an NDArray or numpy.ndarray, \
float32 otherwise.
- row_sparse_array(S)
to construct a RowSparseNDArray with a sparse ndarray ``S``
- **S** (*RowSparseNDArray*) - A sparse ndarray.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is ``S.dtype``.
- row_sparse_array((D0, D1 .. Dn))
to construct an empty RowSparseNDArray with shape ``(D0, D1, ... Dn)``
- **D0, D1 .. Dn** (*int*) - The shape of the ndarray
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
- row_sparse_array((data, indices))
to construct a RowSparseNDArray based on the definition of row sparse format \
using two separate arrays, \
where the `indices` stores the indices of the row slices with non-zeros,
while the values are stored in `data`. The corresponding NDArray ``dense``
represented by RowSparseNDArray ``rsp`` has \
``dense[rsp.indices[i], :, :, :, ...] = rsp.data[i, :, :, :, ...]``
The row indices for are expected to be **sorted in ascending order.** \
- **data** (*array_like*) - An object exposing the array interface, which \
holds all the non-zero row slices of the array.
- **indices** (*array_like*) - An object exposing the array interface, which \
stores the row index for each row slice with non-zero elements.
- **shape** (*tuple of int, optional*) - The shape of the array. The default \
shape is inferred from the indices and indptr arrays.
- **ctx** (*Context, optional*) - Device context \
(default is the current default context).
- **dtype** (*str or numpy.dtype, optional*) - The data type of the output array. \
The default dtype is float32.
Parameters
----------
arg1: NDArray, numpy.ndarray, RowSparseNDArray, tuple of int or tuple of array_like
The argument to help instantiate the row sparse ndarray. See above for further details.
shape : tuple of int, optional
The shape of the row sparse ndarray.
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array.
Returns
-------
RowSparseNDArray
An `RowSparseNDArray` with the `row_sparse` storage representation.
Example
-------
>>> a = mx.nd.sparse.row_sparse_array(([[1, 2], [3, 4]], [1, 4]), shape=(6, 2))
>>> a.asnumpy()
array([[ 0., 0.],
[ 1., 2.],
[ 0., 0.],
[ 0., 0.],
[ 3., 4.],
[ 0., 0.]], dtype=float32)
See Also
--------
RowSparseNDArray : MXNet NDArray in row sparse format.
"""
# construct a row sparse array from (D0, D1 ..) or (data, indices)
if isinstance(arg1, tuple):
arg_len = len(arg1)
if arg_len < 2:
raise ValueError("Unexpected length of input tuple: " + str(arg_len))
elif arg_len > 2:
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# len(arg1) = 2, is either shape or (data, indices)
if isinstance(arg1[0], integer_types) and isinstance(arg1[1], integer_types):
# empty ndarray with shape
_check_shape(arg1, shape)
return empty('row_sparse', arg1, ctx=ctx, dtype=dtype)
else:
# data, indices, indptr
return _row_sparse_ndarray_from_definition(arg1[0], arg1[1], shape=shape,
ctx=ctx, dtype=dtype)
else:
# construct a row sparse ndarray from a dense / sparse array
if isinstance(arg1, RowSparseNDArray):
# construct a row sparse ndarray from RowSparseNDArray
_check_shape(arg1.shape, shape)
return array(arg1, ctx=ctx, dtype=dtype)
elif isinstance(arg1, CSRNDArray):
raise ValueError("Unexpected input type: CSRNDArray")
else:
# construct a csr matrix from a dense one
# prepare default dtype since mx.nd.array doesn't use default values
# based on source_array
dtype = _prepare_default_dtype(arg1, dtype)
# create dns array with provided dtype. ctx is not passed since copy across
# ctx requires dtype to be the same
dns = _array(arg1, dtype=dtype)
if ctx is not None and dns.context != ctx:
dns = dns.as_in_context(ctx)
_check_shape(dns.shape, shape)
return dns.tostype('row_sparse')
def _row_sparse_ndarray_from_definition(data, indices, shape=None, ctx=None,
dtype=None, indices_type=None):
"""Create a `RowSparseNDArray` based on data and indices"""
storage_type = 'row_sparse'
# context
ctx = Context.default_ctx if ctx is None else ctx
# types
dtype = _prepare_default_dtype(data, dtype)
indices_type = _STORAGE_AUX_TYPES[storage_type][0] if indices_type is None else indices_type
# prepare src array and types
data = _prepare_src_array(data, dtype)
indices = _prepare_src_array(indices, indices_type)
# TODO(junwu): Convert data, indptr, and indices to mxnet NDArrays
# if they are not for now. In the future, we should provide a c-api
# to accept np.ndarray types to copy from to result.data and aux_data
if not isinstance(data, NDArray):
data = _array(data, ctx, dtype)
if not isinstance(indices, NDArray):
indices = _array(indices, ctx, indices_type)
if shape is None:
num_indices = indices.shape[0]
if num_indices == 0:
raise ValueError('invalid shape')
dim0 = indices[num_indices - 1].asscalar() + 1
shape = (dim0, ) + data.shape[1:]
# verify shapes
if data.ndim != len(shape) or indices.ndim != 1 or np.prod(shape[1:]) == 0:
raise ValueError("invalid shape")
result = RowSparseNDArray(_new_alloc_handle(storage_type, shape, ctx, False, dtype,
[indices_type], [indices.shape]))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, data.handle, ctypes.c_int(-1)))
check_call(_LIB.MXNDArraySyncCopyFromNDArray(result.handle, indices.handle, ctypes.c_int(0)))
return result
def _ndarray_cls(handle, writable=True, stype=_STORAGE_TYPE_UNDEFINED):
if stype == _STORAGE_TYPE_UNDEFINED:
stype = _storage_type(handle)
if stype == _STORAGE_TYPE_DEFAULT:
return NDArray(handle, writable=writable)
elif stype == _STORAGE_TYPE_CSR:
return CSRNDArray(handle, writable=writable)
elif stype == _STORAGE_TYPE_ROW_SPARSE:
return RowSparseNDArray(handle, writable=writable)
else:
raise Exception("unknown storage type: %s"%stype)
_set_ndarray_class(_ndarray_cls)
def add(lhs, rhs):
"""Returns element-wise sum of the input arrays with broadcasting.
Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and
``mx.nd.broadcast_plus(lhs, rhs)`` when shapes of lhs and rhs do not
match. If lhs.shape == rhs.shape, this is equivalent to
``mx.nd.elemwise_add(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.abs
Parameters
----------
lhs : scalar or array
First array to be added.
rhs : scalar or array
Second array to be added.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise sum of the input arrays.
Examples
--------
>>> a = mx.nd.ones((2,3)).tostype('csr')
>>> b = mx.nd.ones((2,3)).tostype('csr')
>>> a.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> b.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (a+b).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> c = mx.nd.ones((2,3)).tostype('row_sparse')
>>> d = mx.nd.ones((2,3)).tostype('row_sparse')
>>> c.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> d.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (c+d).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_add,
operator.add,
_internal._plus_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_add,
operator.add,
_internal._plus_scalar,
None)
# pylint: enable= no-member, protected-access
def subtract(lhs, rhs):
"""Returns element-wise difference of the input arrays with broadcasting.
Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and
``mx.nd.broadcast_minus(lhs, rhs)`` when shapes of lhs and rhs do not
match. If lhs.shape == rhs.shape, this is equivalent to
``mx.nd.elemwise_sub(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be subtracted.
rhs : scalar or array
Second array to be subtracted.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.__spec__
Returns
-------
NDArray
The element-wise difference of the input arrays.
Examples
--------
>>> a = mx.nd.ones((2,3)).tostype('csr')
>>> b = mx.nd.ones((2,3)).tostype('csr')
>>> a.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> b.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (a-b).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> c = mx.nd.ones((2,3)).tostype('row_sparse')
>>> d = mx.nd.ones((2,3)).tostype('row_sparse')
>>> c.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> d.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (c-d).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_sub,
operator.sub,
_internal._minus_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_sub,
operator.sub,
_internal._minus_scalar,
None)
# pylint: enable= no-member, protected-access
def multiply(lhs, rhs):
"""Returns element-wise product of the input arrays with broadcasting.
Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)``
when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,
this is equivalent to ``mx.nd.elemwise_mul(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be multiplied.
rhs : scalar or array
Second array to be multiplied.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise multiplication of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3)).tostype('csr')
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(3)
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> (x*2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x*y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (x*z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> z = z.reshape((1, 3))
>>> z.asnumpy()
array([[ 0., 1., 2.]], dtype=float32)
>>> (x*z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
>>> mx.nd.sparse.multiply(x, z).asnumpy()
array([[ 0., 1., 2.],
[ 0., 1., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_mul,
operator.mul,
_internal._mul_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_mul,
operator.mul,
_internal._mul_scalar,
None)
# pylint: enable= no-member, protected-access
def divide(lhs, rhs):
"""Returns element-wise division of the input arrays with broadcasting.
Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)``
when shapes of lhs and rhs do not match. If lhs.shape == rhs.shape,
this is equivalent to ``mx.nd.elemwise_div(lhs, rhs)``
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array in division.
rhs : scalar or array
Second array in division.
The arrays to be divided. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise division of the input arrays.
Examples
--------
>>> x = (mx.nd.ones((2,3))*6).tostype('csr')
>>> y = mx.nd.arange(2).reshape((2,1)) + 1
>>> z = mx.nd.arange(3) + 1
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> z.asnumpy()
array([ 1., 2., 3.], dtype=float32)
>>> x/2
<NDArray 2x3 @cpu(0)>
>>> (x/3).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x/y).asnumpy()
array([[ 6., 6., 6.],
[ 3., 3., 3.]], dtype=float32)
>>> mx.nd.sparse.divide(x,y).asnumpy()
array([[ 6., 6., 6.],
[ 3., 3., 3.]], dtype=float32)
>>> (x/z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> mx.nd.sprase.divide(x,z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> z = z.reshape((1,3))
>>> z.asnumpy()
array([[ 1., 2., 3.]], dtype=float32)
>>> (x/z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
>>> mx.nd.sparse.divide(x,z).asnumpy()
array([[ 6., 3., 2.],
[ 6., 3., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
if isinstance(lhs, NDArray) and isinstance(rhs, NDArray) and lhs.shape == rhs.shape:
return _ufunc_helper(
lhs,
rhs,
op.elemwise_div,
operator.truediv,
_internal._div_scalar,
None)
return _ufunc_helper(
lhs,
rhs,
op.broadcast_div,
operator.truediv,
_internal._div_scalar,
None)
# pylint: enable= no-member, protected-access
def zeros(stype, shape, ctx=None, dtype=None, **kwargs):
"""Return a new array of given shape and type, filled with zeros.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array
ctx : Context, optional
An optional device context (default is the current default context)
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`)
Returns
-------
RowSparseNDArray or CSRNDArray
A created array
Examples
--------
>>> mx.nd.sparse.zeros('csr', (1,2))
<CSRNDArray 1x2 @cpu(0)>
>>> mx.nd.sparse.zeros('row_sparse', (1,2), ctx=mx.cpu(), dtype='float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
# pylint: disable= no-member, protected-access
if stype == 'default':
return _zeros_ndarray(shape, ctx=ctx, dtype=dtype, **kwargs)
if ctx is None:
ctx = Context.default_ctx
dtype = mx_real_t if dtype is None else dtype
if stype == 'row_sparse' or stype == 'csr':
aux_types = _STORAGE_AUX_TYPES[stype]
else:
raise ValueError("unknown storage type" + stype)
out = _ndarray_cls(_new_alloc_handle(stype, shape, ctx, True, dtype, aux_types))
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, out=out, **kwargs)
# pylint: enable= no-member, protected-access
def empty(stype, shape, ctx=None, dtype=None):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
stype: string
The storage type of the empty array, such as 'row_sparse', 'csr', etc
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
Returns
-------
CSRNDArray or RowSparseNDArray
A created array.
"""
if isinstance(shape, int):
shape = (shape, )
if ctx is None:
ctx = Context.default_ctx
if dtype is None:
dtype = mx_real_t
assert(stype is not None)
if stype == 'csr' or stype == 'row_sparse':
return zeros(stype, shape, ctx=ctx, dtype=dtype)
else:
raise Exception("unknown stype : " + str(stype))
def array(source_array, ctx=None, dtype=None):
"""Creates a sparse array from any object exposing the array interface.
Parameters
----------
source_array : RowSparseNDArray, CSRNDArray or scipy.sparse.csr.csr_matrix
The source sparse array
ctx : Context, optional
The default context is ``source_array.context`` if ``source_array`` is an NDArray. \
The current default context otherwise.
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `numpy.ndarray` or `scipy.sparse.csr.csr_matrix`, \
`float32` otherwise.
Returns
-------
RowSparseNDArray or CSRNDArray
An array with the same contents as the `source_array`.
Examples
--------
>>> import scipy.sparse as spsp
>>> csr = spsp.csr_matrix((2, 100))
>>> mx.nd.sparse.array(csr)
<CSRNDArray 2x100 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('csr', (3, 2)))
<CSRNDArray 3x2 @cpu(0)>
>>> mx.nd.sparse.array(mx.nd.sparse.zeros('row_sparse', (3, 2)))
<RowSparseNDArray 3x2 @cpu(0)>
"""
ctx = Context.default_ctx if ctx is None else ctx
if isinstance(source_array, NDArray):
assert(source_array.stype != 'default'), \
"Please use `tostype` to create RowSparseNDArray or CSRNDArray from an NDArray"
# prepare dtype and ctx based on source_array, if not provided
dtype = _prepare_default_dtype(source_array, dtype)
# if both dtype and ctx are different from source_array, we cannot copy directly
if source_array.dtype != dtype and source_array.context != ctx:
arr = empty(source_array.stype, source_array.shape, dtype=dtype)
arr[:] = source_array
arr = arr.as_in_context(ctx)
else:
arr = empty(source_array.stype, source_array.shape, dtype=dtype, ctx=ctx)
arr[:] = source_array
return arr
elif spsp and isinstance(source_array, spsp.csr.csr_matrix):
# TODO(haibin) implement `_sync_copy_from` with scipy csr object to reduce a copy
# preprocess scipy csr to canonical form
csr = source_array.sorted_indices()
csr.sum_duplicates()
dtype = _prepare_default_dtype(source_array, dtype)
return csr_matrix((csr.data, csr.indices, csr.indptr), shape=csr.shape, \
dtype=dtype, ctx=ctx)
elif isinstance(source_array, (np.ndarray, np.generic)):
raise ValueError("Please use mx.nd.array to create an NDArray with source_array of type ",
type(source_array))
else:
raise ValueError("Unexpected source_array type: ", type(source_array))
|
TuSimple/mxnet
|
python/mxnet/ndarray/sparse.py
|
Python
|
apache-2.0
| 62,018
|
#!/usr/bin/env python
'''
decode RCDA messages from a log and optionally play back to a serial port. The RCDA message is
captures RC input bytes when RC_OPTIONS=16 is set
'''
import struct
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--condition", default=None, help="select packets by condition")
parser.add_argument("--baudrate", type=int, default=115200, help="baudrate")
parser.add_argument("--port", type=str, default=None, help="port")
parser.add_argument("--delay-mul", type=float, default=1.0, help="delay multiplier")
parser.add_argument("log", metavar="LOG")
import time
import serial
args = parser.parse_args()
from pymavlink import mavutil
print("Processing log %s" % args.log)
mlog = mavutil.mavlink_connection(args.log)
if args.port:
port = serial.Serial(args.port, args.baudrate, timeout=1.0)
tlast = -1
counter = 0
while True:
msg = mlog.recv_match(type=['RCDA'], condition=args.condition)
if msg is None:
mlog.rewind()
tlast = -1
continue
tnow = msg.TimeUS
if tlast == -1:
tlast = tnow
buf = struct.pack("<IIIIIIIIII",
msg.U0, msg.U1, msg.U2, msg.U3, msg.U4,
msg.U5, msg.U6, msg.U7, msg.U8, msg.U9)[0:msg.Len]
ibuf = [ ord(b) for b in buf ]
dt = tnow - tlast
tlast = tnow
print(len(ibuf), ibuf, dt)
if args.port:
time.sleep(dt*1.0e-6*args.delay_mul)
port.write(buf)
|
ArduPilot/ardupilot
|
Tools/scripts/rcda_decode.py
|
Python
|
gpl-3.0
| 1,487
|
#!/usr/bin/env python
#-
# Copyright (c) 2006 Verdens Gang AS
# Copyright (c) 2006-2015 Varnish Software AS
# All rights reserved.
#
# Author: Poul-Henning Kamp <phk@phk.freebsd.dk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Generate various .c and .h files for the VCL compiler and the interfaces
# for it.
#######################################################################
# These are our tokens
# We could drop all words such as "include", "if" etc, and use the
# ID type instead, but declaring them tokens makes them reserved words
# which hopefully makes for better error messages.
# XXX: does it actually do that ?
import sys
from os.path import join
srcroot = "../.."
buildroot = "../.."
if len(sys.argv) == 3:
srcroot = sys.argv[1]
buildroot = sys.argv[2]
tokens = {
"T_INC": "++",
"T_DEC": "--",
"T_CAND": "&&",
"T_COR": "||",
"T_LEQ": "<=",
"T_EQ": "==",
"T_NEQ": "!=",
"T_GEQ": ">=",
"T_SHR": ">>",
"T_SHL": "<<",
"T_INCR": "+=",
"T_DECR": "-=",
"T_MUL": "*=",
"T_DIV": "/=",
"T_NOMATCH": "!~",
# Single char tokens, for convenience on one line
None: "{}()*+-/%><=;!&.|~,",
# These have handwritten recognizers
"ID": None,
"CNUM": None,
"CSTR": None,
"EOI": None,
"CSRC": None,
}
#######################################################################
# Our methods and actions
returns =(
###############################################################
# Client side
('recv',
"C",
('synth', 'pass', 'pipe', 'hash', 'purge',)
),
('pipe',
"C",
('synth', 'pipe',)
),
('pass',
"C",
('synth', 'restart', 'fetch',)
),
('hash',
"C",
('lookup',)
),
('purge',
"C",
('synth', 'restart',)
),
('miss',
"C",
('synth', 'restart', 'pass', 'fetch',)
),
('hit',
"C",
('synth', 'restart', 'pass', 'fetch', 'miss', 'deliver',)
),
('deliver',
"C",
('synth', 'restart', 'deliver',)
),
('synth',
"C",
('restart', 'deliver',)
),
###############################################################
# Backend-fetch
('backend_fetch',
"B",
('fetch', 'abandon')
),
('backend_response',
"B",
('deliver', 'retry', 'abandon')
),
('backend_error',
"B",
('deliver', 'retry', 'abandon')
),
###############################################################
# Housekeeping
('init',
"H",
('ok', 'fail')
),
('fini',
"H",
('ok',)
),
)
#######################################################################
# Variables available in sessions
#
# 'all' means all methods
# 'client' means all methods tagged "C"
# 'backend' means all methods tagged "B"
# 'both' means all methods tagged "B" or "C"
sp_variables = [
('remote.ip',
'IP',
( 'client',),
( ), """
The IP address of the other end of the TCP connection.
This can either be the clients IP, or the outgoing IP
of a proxy server.
"""
),
('client.ip',
'IP',
( 'client',),
( ), """
The client's IP address.
"""
),
('client.identity',
'STRING',
( 'client',),
( 'client',), """
Identification of the client, used to load balance
in the client director.
"""
),
('local.ip',
'IP',
( 'client',),
( ), """
The IP address of the local end of the TCP connection.
"""
),
('server.ip',
'IP',
( 'client',),
( ), """
The IP address of the socket on which the client
connection was received.
"""
),
('server.hostname',
'STRING',
( 'all',),
( ), """
The host name of the server.
"""
),
('server.identity',
'STRING',
( 'all',),
( ), """
The identity of the server, as set by the -i
parameter. If the -i parameter is not passed to varnishd,
server.identity will be set to the name of the instance, as
specified by the -n parameter.
"""
),
('req',
'HTTP',
( 'client',),
( ), """
The entire request HTTP data structure
"""
),
('req.method',
'STRING',
( 'client',),
( 'client',), """
The request type (e.g. "GET", "HEAD").
"""
),
('req.url',
'STRING',
( 'client',),
( 'client',), """
The requested URL.
"""
),
('req.proto',
'STRING',
( 'client',),
( 'client',), """
The HTTP protocol version used by the client.
"""
),
('req.http.',
'HEADER',
( 'client',),
( 'client',), """
The corresponding HTTP header.
"""
),
('req.restarts',
'INT',
( 'client',),
( ), """
A count of how many times this request has been restarted.
"""
),
('req.esi_level',
'INT',
( 'client',),
( ), """
A count of how many levels of ESI requests we're currently at.
"""
),
('req.ttl',
'DURATION',
( 'client',),
( 'client',), """
"""
),
('req.xid',
'STRING',
( 'client',),
( ), """
Unique ID of this request.
"""
),
('req.esi',
'BOOL',
( 'client',),
( 'client',), """
Boolean. Set to false to disable ESI processing
regardless of any value in beresp.do_esi. Defaults
to true. This variable is subject to change in
future versions, you should avoid using it.
"""
),
('req.can_gzip',
'BOOL',
( 'client',),
( ), """
Does the client accept the gzip transfer encoding.
"""
),
('req.backend_hint',
'BACKEND',
( 'client', ),
( 'client',), """
Set bereq.backend to this if we attempt to fetch.
"""
),
('req.hash_ignore_busy',
'BOOL',
( 'recv',),
( 'recv',), """
Ignore any busy object during cache lookup. You
would want to do this if you have two server looking
up content from each other to avoid potential deadlocks.
"""
),
('req.hash_always_miss',
'BOOL',
( 'recv',),
( 'recv',), """
Force a cache miss for this request. If set to true
Varnish will disregard any existing objects and
always (re)fetch from the backend.
"""
),
('req_top.method',
'STRING',
( 'client',),
(), """
The request method of the top-level request in a tree
of ESI requests. (e.g. "GET", "HEAD").
Identical to req.method in non-ESI requests.
"""
),
('req_top.url',
'STRING',
( 'client',),
(), """
The requested URL of the top-level request in a tree
of ESI requests.
Identical to req.url in non-ESI requests.
"""
),
('req_top.http.',
'HEADER',
( 'client',),
(), """
HTTP headers of the top-level request in a tree of ESI requests.
Identical to req.http. in non-ESI requests.
"""
),
('req_top.proto',
'STRING',
( 'client',),
(), """
HTTP protocol version of the top-level request in a tree of
ESI requests.
Identical to req.proto in non-ESI requests.
"""
),
('bereq',
'HTTP',
( 'backend',),
( ), """
The entire backend request HTTP data structure
"""
),
('bereq.xid',
'STRING',
( 'backend',),
( ), """
Unique ID of this request.
"""
),
('bereq.retries',
'INT',
( 'backend',),
( ), """
A count of how many times this request has been retried.
"""
),
('bereq.backend',
'BACKEND',
( 'pipe', 'backend', ),
( 'pipe', 'backend', ), """
This is the backend or director we attempt to fetch from.
"""
),
('bereq.method',
'STRING',
( 'pipe', 'backend', ),
( 'pipe', 'backend', ), """
The request type (e.g. "GET", "HEAD").
"""
),
('bereq.url',
'STRING',
( 'pipe', 'backend', ),
( 'pipe', 'backend', ), """
The requested URL.
"""
),
('bereq.proto',
'STRING',
( 'pipe', 'backend', ),
( 'pipe', 'backend', ), """
The HTTP protocol version used to talk to the server.
"""
),
('bereq.http.',
'HEADER',
( 'pipe', 'backend', ),
( 'pipe', 'backend', ), """
The corresponding HTTP header.
"""
),
('bereq.uncacheable',
'BOOL',
( 'backend', ),
( ), """
Indicates whether this request is uncacheable due
to a pass in the client side or a hit on an existing
uncacheable object (aka hit-for-pass).
"""
),
('bereq.connect_timeout',
'DURATION',
( 'pipe', 'backend', ),
( 'pipe', 'backend', ), """
The time in seconds to wait for a backend connection.
"""
),
('bereq.first_byte_timeout',
'DURATION',
( 'backend', ),
( 'backend', ), """
The time in seconds to wait for the first byte from
the backend. Not available in pipe mode.
"""
),
('bereq.between_bytes_timeout',
'DURATION',
( 'backend', ),
( 'backend', ), """
The time in seconds to wait between each received byte from the
backend. Not available in pipe mode.
"""
),
('beresp',
'HTTP',
( 'backend_response', 'backend_error'),
( ), """
The entire backend response HTTP data structure
"""
),
('beresp.proto',
'STRING',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
The HTTP protocol version used the backend replied with.
"""
),
('beresp.status',
'INT',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
The HTTP status code returned by the server.
"""
),
('beresp.reason',
'STRING',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
The HTTP status message returned by the server.
"""
),
('beresp.http.',
'HEADER',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
The corresponding HTTP header.
"""
),
('beresp.do_esi',
'BOOL',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Boolean. ESI-process the object after fetching it.
Defaults to false. Set it to true to parse the
object for ESI directives. Will only be honored if
req.esi is true.
"""
),
('beresp.do_stream',
'BOOL',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Deliver the object to the client directly without
fetching the whole object into varnish. If this
request is pass'ed it will not be stored in memory.
"""
),
('beresp.do_gzip',
'BOOL',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Boolean. Gzip the object before storing it. Defaults
to false. When http_gzip_support is on Varnish will
request already compressed content from the backend
and as such compression in Varnish is not needed.
"""
),
('beresp.do_gunzip',
'BOOL',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Boolean. Unzip the object before storing it in the
cache. Defaults to false.
"""
),
('beresp.was_304',
'BOOL',
( 'backend_response', 'backend_error'),
( ), """
Boolean. If this is a successful 304 response to a
backend conditional request refreshing an existing
cache object.
"""
),
('beresp.uncacheable',
'BOOL',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Inherited from bereq.uncacheable, see there.
Setting this variable makes the object uncacheable, which may
get stored as a hit-for-pass object in the cache.
Clearing the variable has no effect and will log the warning
"Ignoring attempt to reset beresp.uncacheable".
"""
),
('beresp.ttl',
'DURATION',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
The object's remaining time to live, in seconds.
"""
),
('beresp.age',
'DURATION',
( 'backend_response', 'backend_error'),
( ), """
The age of the object.
"""
),
('beresp.grace',
'DURATION',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Set to a period to enable grace.
"""
),
('beresp.keep',
'DURATION',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Set to a period to enable conditional backend requests.
The keep time is cache lifetime in addition to the ttl.
Objects with ttl expired but with keep time left may be used
to issue conditional (If-Modified-Since / If-None-Match)
requests to the backend to refresh them.
"""
),
('beresp.backend',
'BACKEND',
( 'backend_response', 'backend_error'),
( ), """
This is the backend we fetched from. If bereq.backend
was set to a director, this will be the backend selected
by the director.
"""
),
('beresp.backend.name',
'STRING',
( 'backend_response', 'backend_error'),
( ), """
Name of the backend this response was fetched from.
"""
),
('beresp.backend.ip',
'IP',
( 'backend_response', 'backend_error'),
( ), """
IP of the backend this response was fetched from.
"""
),
('beresp.storage_hint',
'STRING',
( 'backend_response', 'backend_error'),
( 'backend_response', 'backend_error'), """
Hint to Varnish that you want to save this object to a
particular storage backend.
"""
),
('obj.proto',
'STRING',
( 'hit', ),
( ), """
The HTTP protocol version used when the object was retrieved.
"""
),
('obj.status',
'INT',
( 'hit',),
( ), """
The HTTP status code returned by the server.
"""
),
('obj.reason',
'STRING',
( 'hit',),
( ), """
The HTTP status message returned by the server.
"""
),
('obj.hits',
'INT',
( 'hit', 'deliver',),
( ), """
The count of cache-hits on this object. A value of 0 indicates a
cache miss.
"""
),
('obj.http.',
'HEADER',
( 'hit', ),
( ), """
The corresponding HTTP header.
"""
),
('obj.ttl',
'DURATION',
( 'hit', ),
( ), """
The object's remaining time to live, in seconds.
"""
),
('obj.age',
'DURATION',
( 'hit', ),
( ), """
The age of the object.
"""
),
('obj.grace',
'DURATION',
( 'hit', ),
( ), """
The object's remaining grace period in seconds.
"""
),
('obj.keep',
'DURATION',
( 'hit', ),
( ), """
The object's remaining keep period in seconds.
"""
),
('obj.uncacheable',
'BOOL',
( 'deliver', ),
( ), """
Whether the object is uncacheable (pass or hit-for-pass).
"""
),
('resp',
'HTTP',
( 'deliver', 'synth'),
( ), """
The entire response HTTP data structure.
"""
),
('resp.proto',
'STRING',
( 'deliver', 'synth', ),
( 'deliver', 'synth', ), """
The HTTP protocol version to use for the response.
"""
),
('resp.status',
'INT',
( 'deliver', 'synth', ),
( 'deliver', 'synth', ), """
The HTTP status code that will be returned.
Assigning a HTTP standardized code to resp.status will also
set resp.reason to the corresponding status message.
"""
),
('resp.reason',
'STRING',
( 'deliver', 'synth', ),
( 'deliver', 'synth', ), """
The HTTP status message that will be returned.
"""
),
('resp.http.',
'HEADER',
( 'deliver', 'synth', ),
( 'deliver', 'synth', ), """
The corresponding HTTP header.
"""
),
('resp.is_streaming',
'BOOL',
( 'deliver', 'synth', ),
( ), """
Returns true when the response will be streamed
from the backend.
"""
),
('now',
'TIME',
( 'all',),
( ), """
The current time, in seconds since the epoch. When
used in string context it returns a formatted string.
"""
),
]
# Backwards compatibility:
aliases = [
]
stv_variables = (
('free_space', 'BYTES', "0.", 'storage.<name>.free_space', """
Free space available in the named stevedore. Only available for
the malloc stevedore.
"""),
('used_space', 'BYTES', "0.", 'storage.<name>.used_space', """
Used space in the named stevedore. Only available for the malloc
stevedore.
"""),
('happy', 'BOOL', "0", 'storage.<name>.happy', """
Health status for the named stevedore. Not available in any of the
current stevedores.
"""),
)
#######################################################################
# VCL to C type conversion
vcltypes = {
'STRING_LIST': "void*",
}
fi = open(join(srcroot, "include/vrt.h"))
for i in fi:
j = i.split();
if len(j) < 3:
continue
if j[0] != "typedef":
continue
if j[-1][-1] != ";":
continue
if j[-1][-2] == ")":
continue
if j[-1][:4] != "VCL_":
continue
d = " ".join(j[1:-1])
vcltypes[j[-1][4:-1]] = d
fi.close()
#######################################################################
# Nothing is easily configurable below this line.
#######################################################################
import sys
import copy
#######################################################################
# Emit a function to recognize tokens in a string
def emit_vcl_fixed_token(fo, tokens):
recog = list()
emit = dict()
for i in tokens:
j = tokens[i]
if (j != None):
recog.append(j)
emit[j] = i
recog.sort()
rrecog = copy.copy(recog)
rrecog.sort(key = lambda x: -len(x))
fo.write("""
#define M1()\tdo {*q = p + 1; return (p[0]); } while (0)
#define M2(c,t)\tdo {if (p[1] == (c)) { *q = p + 2; return (t); }} while (0)
unsigned
vcl_fixed_token(const char *p, const char **q)
{
\tswitch (p[0]) {
""")
last_initial = None
for i in recog:
if (i[0] == last_initial):
continue
last_initial = i[0]
fo.write("\tcase '%s':\n" % last_initial)
need_ret = True
for j in rrecog:
if (j[0] != last_initial):
continue
if len(j) == 2:
fo.write("\t\tM2('%s', %s);\n" %
(j[1], emit[j]))
elif len(j) == 1:
fo.write("\t\tM1();\n")
need_ret = False
else:
fo.write("\t\tif (")
k = 1
l = len(j)
while (k < l):
fo.write("p[%d] == '%s'" % (k, j[k]))
fo.write(" &&")
if (k % 3) == 0:
fo.write("\n\t\t ")
else:
fo.write(" ")
k += 1
fo.write("!isvar(p[%d])) {\n" % l)
fo.write("\t\t\t*q = p + %d;\n" % l)
fo.write("\t\t\treturn (%s);\n" % emit[j])
fo.write("\t\t}\n")
if need_ret:
fo.write("\t\treturn (0);\n")
fo.write("\tdefault:\n\t\treturn (0);\n\t}\n}\n")
#######################################################################
# Emit the vcl_tnames (token->string) conversion array
def emit_vcl_tnames(fo, tokens):
fo.write("\nconst char * const vcl_tnames[256] = {\n")
l = list(tokens.keys())
l.sort()
for i in l:
j = tokens[i]
if j == None:
j = i
if i[0] == "'":
j = i
fo.write("\t[%s] = \"%s\",\n" % (i, j))
fo.write("};\n")
#######################################################################
# Read a C-source file and spit out code that outputs it with VSB_cat()
def emit_file(fo, fd, bn):
fn = join(fd, bn)
fi = open(fn)
fc = fi.read()
fi.close()
w = 66 # Width of lines, after white space prefix
maxlen = 10240 # Max length of string literal
x = 0
l = 0
fo.write("\n\t/* %s */\n\n" % fn)
fo.write('\tVSB_cat(sb, "/* ---===### %s ###===--- */\\n\\n");\n' % bn)
for c in fc:
if l == 0:
fo.write("\tVSB_cat(sb, \"")
l += 12
x += 12
if x == 0:
fo.write("\t \"")
d = c
if c == '\n':
d = "\\n"
elif c == '\t':
d = "\\t"
elif c == '"':
d = "\\\""
elif c == '\\':
d = "\\\\"
if c == '\n' and x > w - 20:
fo.write(d + "\"\n")
x = 0
continue
if c.isspace() and x > w - 10:
fo.write(d + "\"\n")
x = 0
continue
fo.write(d)
x += len(d)
l += len(d)
if l > maxlen:
fo.write("\");\n")
l = 0;
x = 0
if x > w - 3:
fo.write("\"\n")
x = 0
if x != 0:
fo.write("\"\n")
if l != 0:
fo.write("\t);\n")
fo.write('\tVSB_cat(sb, "\\n");\n')
#######################################################################
def polish_tokens(tokens):
# Expand single char tokens
st = tokens[None]
del tokens[None]
for i in st:
tokens["'" + i + "'"] = i
#######################################################################
def file_header(fo):
fo.write("""/*
* NB: This file is machine generated, DO NOT EDIT!
*
* Edit and run generate.py instead
*/
""")
#######################################################################
polish_tokens(tokens)
fo = open(join(buildroot, "lib/libvcc/vcc_token_defs.h"), "w")
file_header(fo)
j = 128
l = list(tokens.keys())
l.sort()
for i in l:
if i[0] == "'":
continue
fo.write("#define\t%s %d\n" % (i, j))
j += 1
assert j < 256
fo.close()
#######################################################################
rets = dict()
vcls = list()
vcls_client = list()
vcls_backend = list()
for i in returns:
vcls.append(i[0])
for j in i[1]:
if j == "B":
vcls_backend.append(i[0])
elif j == "C":
vcls_client.append(i[0])
for j in i[2]:
rets[j] = True
#######################################################################
fo = open(join(buildroot, "include/tbl/vcl_returns.h"), "w")
file_header(fo)
fo.write("\n/*lint -save -e525 -e539 */\n")
fo.write("\n#ifdef VCL_RET_MAC\n")
l = list(rets.keys())
l.sort()
ll = list(returns)
ll.sort()
for i in l:
fo.write("VCL_RET_MAC(%s, %s" % (i.lower(), i.upper()))
s=",\n\t"
for j in ll:
if i in j[2]:
fo.write("%sVCL_MET_%s" % (s, j[0].upper()))
s = " |\n\t"
fo.write("\n)\n")
fo.write("#endif\n")
fo.write("\n#ifdef VCL_MET_MAC\n")
for i in ll:
fo.write("VCL_MET_MAC(%s, %s, %s," %
(i[0].lower(), i[0].upper(), i[1]))
p = " (\n\t"
lll = list(i[2])
lll.sort()
for j in lll:
fo.write("%s(1U << VCL_RET_%s)" % (p, j.upper()))
p = " |\n\t"
fo.write("\n))\n")
fo.write("#endif\n")
fo.write("\n/*lint -restore */\n")
fo.close()
#######################################################################
fo = open(join(buildroot, "include/vcl.h"), "w")
file_header(fo)
fo.write("""
struct vrt_ctx;
#define VRT_CTX const struct vrt_ctx *ctx
struct req;
struct busyobj;
struct ws;
struct cli;
struct worker;
enum vcl_event_e {
VCL_EVENT_LOAD,
VCL_EVENT_WARM,
VCL_EVENT_USE,
VCL_EVENT_COLD,
VCL_EVENT_DISCARD,
};
typedef int vcl_event_f(VRT_CTX, enum vcl_event_e);
typedef int vcl_init_f(VRT_CTX);
typedef void vcl_fini_f(VRT_CTX);
typedef int vcl_func_f(VRT_CTX);
""")
def tbl40(a, b):
while len(a.expandtabs()) < 40:
a += "\t"
return a + b
fo.write("\n/* VCL Methods */\n")
n = 1
for i in returns:
fo.write(
tbl40("#define VCL_MET_%s" % i[0].upper(), "(1U << %d)\n" % n)
)
n += 1
fo.write("\n" + tbl40("#define VCL_MET_MAX", "%d\n" % n))
fo.write("\n" + tbl40("#define VCL_MET_MASK", "0x%x\n" % ((1 << n) - 1)))
fo.write("\n/* VCL Returns */\n")
n = 0
l = list(rets.keys())
l.sort()
for i in l:
fo.write(tbl40("#define VCL_RET_%s" % i.upper(), "%d\n" % n))
n += 1
fo.write("\n" + tbl40("#define VCL_RET_MAX", "%d\n" % n))
fo.write("""
struct VCL_conf {
unsigned magic;
#define VCL_CONF_MAGIC 0x7406c509 /* from /dev/random */
struct director **default_director;
const struct vrt_backend_probe *default_probe;
unsigned nref;
struct vrt_ref *ref;
unsigned nsrc;
const char **srcname;
const char **srcbody;
vcl_event_f *event_vcl;
""")
for i in returns:
fo.write("\tvcl_func_f\t*" + i[0] + "_func;\n")
fo.write("""
};
""")
fo.close()
#######################################################################
def restrict(fo, spec):
d = dict()
for j in spec:
if j == 'all':
for i in vcls:
d[i] = True
elif j == 'backend':
for i in vcls_backend:
d[i] = True
elif j == 'client':
for i in vcls_client:
d[i] = True
elif j == 'both':
for i in vcls_client:
d[i] = True
for i in vcls_backend:
d[i] = True
else:
assert j in vcls
d[j] = True
p = ""
n = 0
l = list(d.keys())
l.sort()
w = 0
fo.write("\t\t")
for j in l:
x = p + "VCL_MET_" + j.upper()
if w + len(x) > 60:
fo.write("\n\t\t")
w = 0
fo.write(x)
w += len(x)
p = " | "
if len(d) == 0:
fo.write("0")
fo.write(",\n")
#######################################################################
fh = open(join(buildroot, "include/vrt_obj.h"), "w")
file_header(fh)
fo = open(join(buildroot, "lib/libvcc/vcc_obj.c"), "w")
file_header(fo)
fo.write("""
#include "config.h"
#include <stdio.h>
#include "vcc_compile.h"
const struct var vcc_vars[] = {
""")
def one_var(nm, spec):
fh.write("\n")
typ = spec[1]
cnam = i[0].replace(".", "_")
ctyp = vcltypes[typ]
fo.write("\t{ \"%s\", %s, %d,\n" % (nm, typ, len(nm)))
if len(spec[2]) == 0:
fo.write('\t NULL,\t/* No reads allowed */\n')
elif typ == "HEADER":
fo.write('\t "HDR_')
fo.write(nm.split(".")[0].upper())
fo.write('",\n')
else:
fo.write('\t "VRT_r_%s(ctx)",\n' % cnam)
if nm == i[0]:
fh.write("VCL_" + typ +
" VRT_r_%s(VRT_CTX);\n" % cnam )
restrict(fo, spec[2])
if len(spec[3]) == 0:
fo.write('\t NULL,\t/* No writes allowed */\n')
elif typ == "HEADER":
fo.write('\t "HDR_')
fo.write(nm.split(".")[0].upper())
fo.write('",\n')
else:
fo.write('\t "VRT_l_%s(ctx, ",\n' % cnam)
if nm == i[0]:
fh.write(
"void VRT_l_%s(VRT_CTX, " % cnam)
if typ != "STRING":
fh.write("VCL_" + typ + ");\n")
else:
fh.write(ctyp + ", ...);\n")
restrict(fo, spec[3])
fo.write("\t},\n")
sp_variables.sort()
aliases.sort()
for i in sp_variables:
one_var(i[0], i)
for j in aliases:
if j[1] == i[0]:
one_var(j[0], i)
fo.write("\t{ NULL }\n};\n")
fh.write("\n")
for i in stv_variables:
fh.write(vcltypes[i[1]] + " VRT_Stv_" + i[0] + "(const char *);\n")
fo.close()
fh.close()
#######################################################################
fo = open(join(buildroot, "lib/libvcc/vcc_fixed_token.c"), "w")
file_header(fo)
fo.write("""
#include "config.h"
#include <ctype.h>
#include <stdio.h>
#include "vcc_compile.h"
""")
emit_vcl_fixed_token(fo, tokens)
emit_vcl_tnames(fo, tokens)
fo.write("""
void
vcl_output_lang_h(struct vsb *sb)
{
""")
emit_file(fo, srcroot, "include/vdef.h")
emit_file(fo, buildroot, "include/vcl.h")
emit_file(fo, srcroot, "include/vrt.h")
emit_file(fo, buildroot, "include/vrt_obj.h")
fo.write("""
}
""")
fo.close()
#######################################################################
ft = open(join(buildroot, "include/tbl/vcc_types.h"), "w")
file_header(ft)
ft.write("/*lint -save -e525 -e539 */\n")
i = list(vcltypes.keys())
i.sort()
for j in i:
ft.write("VCC_TYPE(" + j + ")\n")
ft.write("/*lint -restore */\n")
ft.close()
#######################################################################
fo = open(join(buildroot, "include/tbl/vrt_stv_var.h"), "w")
file_header(fo)
fo.write("""
#ifndef VRTSTVTYPE
#define VRTSTVTYPE(ct)
#define VRTSTVTYPEX
#endif
#ifndef VRTSTVVAR
#define VRTSTVVAR(nm, vtype, ctype, dval)
#define VRTSTVVARX
#endif
""")
x=dict()
for i in stv_variables:
ct = vcltypes[i[1]]
if not ct in x:
fo.write("VRTSTVTYPE(" + ct + ")\n")
x[ct] = 1
fo.write("VRTSTVVAR(" + i[0] + ",\t" + i[1] + ",\t")
fo.write(ct + ",\t" + i[2] + ")")
fo.write("\n")
fo.write("""
#ifdef VRTSTVTYPEX
#undef VRTSTVTYPEX
#undef VRTSTVTYPE
#endif
#ifdef VRTSTVVARX
#undef VRTSTVVARX
#undef VRTSTVVAR
#endif
""")
fo.close
#######################################################################
fp_vclvar = open(join(buildroot, "doc/sphinx/include/vcl_var.rst"), "w")
l = list()
for i in sp_variables:
l.append(i)
l.sort()
def rst_where(fo, h, l):
ll = list()
if len(l) == 0:
return
fo.write("\t" + h)
s = ""
for j in l:
if j == "both":
ll.append("client")
ll.append("backend")
elif j == "client":
ll.append(j)
elif j == "backend":
ll.append(j)
elif j == "all":
ll.append(j)
else:
ll.append("vcl_" + j)
for j in ll:
fo.write(s + j)
s = ", "
fo.write("\n\n")
hdr=""
for i in l:
j = i[0].split(".")
if j[0] != hdr:
fp_vclvar.write("\n" + j[0] + "\n")
fp_vclvar.write("~" * len(j[0]) + "\n")
hdr = j[0]
fp_vclvar.write("\n" + i[0] + "\n\n")
fp_vclvar.write("\tType: " + i[1] + "\n\n")
rst_where(fp_vclvar, "Readable from: ", i[2])
rst_where(fp_vclvar, "Writable from: ", i[3])
for j in i[4].split("\n"):
fp_vclvar.write("\t%s\n" % j.strip())
hdr="storage"
fp_vclvar.write("\n" + hdr + "\n");
fp_vclvar.write("~" * len(hdr) + "\n");
for i in stv_variables:
fp_vclvar.write("\n" + i[3] + "\n\n")
fp_vclvar.write("\tType: " + i[1] + "\n\n")
fp_vclvar.write("\tReadable from: client, backend\n\n")
for j in i[4].split("\n"):
fp_vclvar.write("\t%s\n" % j.strip())
fp_vclvar.close()
|
varnish/Varnish-Cache
|
lib/libvcc/generate.py
|
Python
|
bsd-2-clause
| 28,728
|
#
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
copy_user test.
"""
import os
import mutlib
from mysql.utilities.common.user import User
from mysql.utilities.exception import MUTLibError, UtilDBError, UtilError
class test(mutlib.System_test):
"""copy user
This test copies a user from one server to another copying all grants.
"""
server1 = None
server2 = None
need_server = False
def check_prerequisites(self):
# Need at least one server.
self.server1 = None
self.server2 = None
self.need_server = False
if not self.check_num_servers(2):
self.need_server = True
return self.check_num_servers(1)
def setup(self):
self.server1 = self.servers.get_server(0)
if self.need_server:
try:
self.servers.spawn_new_servers(2)
except MUTLibError:
raise MUTLibError("Cannot spawn needed servers.")
self.server2 = self.servers.get_server(1)
self.drop_all()
data_file = "./std_data/basic_users.sql"
try:
self.server1.read_and_exec_SQL(data_file, self.debug)
except MUTLibError as err:
raise MUTLibError(
"Failed to read commands from file {0}: "
"{1}".format(data_file, err.errmsg))
return True
def show_user_grants(self, server, user):
"""Shou user grants.
server[in] Server instance.
user[in] User.
"""
query = "SHOW GRANTS FOR {0}".format(user)
try:
res = server.exec_query(query)
if res is not None:
for row in res:
self.results.append(row[0] + "\n")
except UtilError:
raise MUTLibError("Cannot get grants for {0}.".format(user))
def run(self):
self.res_fname = "result.txt"
from_conn = "--source={0}".format(
self.build_connection_string(self.server1))
to_conn = "--destination={0}".format(
self.build_connection_string(self.server2))
cmd_str = "mysqluserclone.py {0} {1} ".format(from_conn, to_conn)
# Test case 1 - copy a user to a single user
test_num = 1
comment = ("Test case {0} - copy a single user joe_pass@user to a "
"single user: jill@user".format(test_num))
res = self.run_test_case(0, cmd_str + " joe_pass@user jill:duh@user",
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
self.show_user_grants(self.server2, "'jill'@'user'")
# Test case 2 - copy a user to a multiple users
test_num += 1
comment = ("Test case {0} - copy a single user amy_nopass@user to "
"multiple users: jack@user and john@user".format(test_num))
res = self.run_test_case(0,
cmd_str + " amy_nopass@user " +
"jack:duh@user john@user",
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
self.show_user_grants(self.server2, "jack@user")
self.show_user_grants(self.server2, "john@user")
# Test case 3 - attempt to copy a non-existent user
test_num += 1
comment = ("Test case {0} - attempt to copy a non-existent "
"user".format(test_num))
res = self.run_test_case(1, cmd_str + " nosuch@user jack@user",
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
# Test case 4 - attempt to copy a user to a user that already exists
test_num += 1
comment = ("Test case {0} - attempt to copy a user to a user that "
"already exists".format(test_num))
res = self.run_test_case(1, cmd_str + " joe_pass@user jill:duh@user",
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
# Test case 5 - attempt to copy a user to a user that already exists
# with overwrite
test_num += 1
self.show_user_grants(self.server2, "jill@user")
comment = ("Test case {0} - attempt to copy a user to a user that "
"already exists with --force".format(test_num))
res = self.run_test_case(0,
cmd_str + " joe_pass@user " +
"jill:duh@user --force",
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
# No show overwritten grants
self.show_user_grants(self.server2, "jill@user")
# Now show how --include-global-privileges works.
try:
self.server1.exec_query("CREATE USER joe_pass@'%'")
self.server1.exec_query("GRANT ALL ON util_test.* TO "
"joe_pass@'%'")
except UtilDBError as err:
raise MUTLibError("Cannot create user with global grants: "
"{0}".format(err.errmsg))
test_num += 1
comment = ("Test case {0} - show clone without "
"--include-global-privileges".format(test_num))
res = self.run_test_case(0,
cmd_str + " -v joe_pass@user " +
"joe_nopass@user --force ",
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
test_num += 1
comment = ("Test case {0} - show clone with "
"--include-global-privileges".format(test_num))
res = self.run_test_case(0,
cmd_str + (" -v joe_pass@user "
"joe_nopass@user --force "
"--include-global-privileges"),
comment)
if not res:
raise MUTLibError("{0}: failed".format(comment))
self.replace_substring("on [::1]", "on localhost")
# Mask password field on grant statements since it stopped appearing on
# versions >= 5.7.6
self.replace_substring_portion(" IDENTIFIED BY PASSWORD '", "'", "")
self.replace_result("GRANT USAGE ON *.* TO 'joe_nopass'@'user'",
"GRANT USAGE ON *.* TO 'joe_nopass'@'user'\n")
return True
def get_result(self):
return self.compare(__name__, self.results)
def record(self):
return self.save_result_file(__name__, self.results)
@staticmethod
def drop_user(user_name, server):
"""Drops user.
user_name[in] User.
server[in] Server instance.
"""
user = User(server, user_name)
if user.exists():
res = user.drop()
if not res:
print("cleanup: failed to drop user {0}".format(user_name))
return True
def drop_all(self):
"""Drops all database and users created.
"""
user_drop_lst = ["joe_pass@'%'", "joe_pass@user",
"'joe_nopass'@'user'", "'amy_nopass'@'user'",
"'jill'@'user'", "'jack'@'user'", "'john'@'user'",
"'remote'@'%'"]
for user in user_drop_lst:
self.drop_user(user, self.server1)
self.drop_user(user, self.server2)
query = "DROP DATABASE util_test"
try:
self.server1.exec_query(query)
except UtilError:
pass
try:
self.server2.exec_query(query)
except UtilError:
pass
def cleanup(self):
try:
os.unlink(self.res_fname)
except OSError:
pass
self.drop_all()
return True
|
mysql/mysql-utilities
|
mysql-test/t/copy_user.py
|
Python
|
gpl-2.0
| 8,697
|
"""Example of filter functions
String values from templated files and objects are passed through this function. Values can be manipulated and passed
back to be used as jinja2 templated values.
"""
class FilterFunctions(object):
def test_filtering(self, contents): # pylint: disable=no-self-use
"""Function to test filtering for unit testing"""
if contents == "test_filter":
return "text_has_been_filtered"
return contents
|
Bridgewater/appetite
|
tests/filters/example.py
|
Python
|
apache-2.0
| 468
|
import urllib
import volatility.addrspace as addrspace
from PyFDP import FDP
class PyFDPAddressSpace(addrspace.BaseAddressSpace):
"""
FDP AddressSpace for volatility
"""
order = 1
def __init__(self, base, config, layered=False, **kwargs):
addrspace.BaseAddressSpace.__init__(self, base, config, **kwargs)
self.as_assert(base == None or layered, "Must be first Address Space")
self.as_assert(
config.LOCATION.startswith("fdp://"),
"Location doesn't start with fdp://")
self.config = dict(inittype="partial")
self.name = urllib.url2pathname(config.LOCATION[6:])
self.config['name'] = self.name
self.fdp = FDP.FDP(self.config['name'])
self.as_assert(not self.fdp is None, "VM not found")
self.dtb = self.fdp.ReadRegister(0, FDP_CR3_REGISTER)
self.PhysicalMemorySize = self.fdp.GetPhysicalMemorySize()
def read(self, PhysicalAddress, ReadSize):
return self.fdp.ReadPhysicalMemory(PhysicalAddress, ReadSize)
def zread(self, PhysicalAddress, ReadSize):
Buffer = self.read(PhysicalAddress, ReadSize)
if Buffer is None:
Buffer = "\x00" * ReadSize
elif len(Buffer) != ReadSize:
Buffer += "\x00" * (ReadSize - len(Buffer))
return Buffer
def is_valid_address(self, PhysicalAddress):
if PhysicalAddress == None:
return False
return 0 <= PhysicalAddress < self.PhysicalMemorySize
def write(self, PhysicalAddress, Buffer):
return False
def get_cr3(self):
return self.dtb
def get_available_addresses(self):
yield (0, self.PhysicalMemorySize)
|
Winbagility/Winbagility
|
bindings/python/PyFDP/volFDP.py
|
Python
|
mit
| 1,709
|
#!/usr/bin/env python
import numpy as np
import tensorflow as tf
# Model parameters
W = tf.Variable([.3])
b = tf.Variable([-.3])
# Model input and output
x = tf.placeholder(tf.float32)
linear_model = W * x + b
y = tf.placeholder(tf.float32)
# loss
loss = tf.reduce_sum(tf.square(linear_model - y))
# optimizer
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
# training data
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
# training loop
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init) # reset values
for i in range(10000):
sess.run(train, {x:x_train, y:y_train})
# evaluate training accuracy
curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x:x_train, y:y_train})
print("W: %s, b: %s, loss: %s"%(curr_W, curr_b, curr_loss))
|
DeercoderResearch/PhD
|
TensorFlow/ex2/complete_code.py
|
Python
|
mit
| 804
|
#! /usr/bin/env python
## @package pygraylog.server
# This package is used to manage Graylog servers using its remote API thanks to requests.
#
import requests
import pygraylog
#from pygraylog.users import User
## The class used to connect against a Grafana instance
class Server:
## This is the constructor.
def __init__(self, hostname, port=12900, ssl=False, ssl_verify=True):
self.error_msg = ""
self._auth_configured = False
self._data = None
if ssl == True:
proto = 'https'
else:
proto = 'http'
self.url = "%s://%s:%s/api" % ( proto, hostname, port )
self.session = requests.Session()
if ssl == True and ssl_verify == True:
self.session.verify = True
else:
self.session.verify = False
def get_users(self):
_url = "%s/users" % (self.url)
r = self.session.get(_url)
self._handle_request_status_code(r)
if r.status_code == 200:
_result = []
for json_user in r.json()['users']:
user = pygraylog.users.User(self)
try:
user.load_from_json(json_user)
except:
print user.error_msg
return None
_result.append(user)
return _result
self.error_msg = "Bad status code: %s" % (r.status_code)
raise IOError
# def auth_by_token(self, token):
# if self._auth_configured == False:
# self.session.headers.update({ 'Accept' : 'application/json', 'Content-Type' : 'application/json', 'Authorization' : "Bearer %s" % token })
# self._auth_configured = True
# else:
# self.error_msg = "Authentication already configured."
# raise ValueError
def auth_by_auth_basic(self, user, password):
if self._auth_configured == False:
self.session.headers.update({ 'Accept' : 'application/json', 'Content-Type' : 'application/json'})
self.session.auth = (user, password)
self._auth_configured = True
else:
self.error_msg = "Authentication already configured."
raise ValueError
def _handle_request_status_code(self, r):
if r.status_code >= 500:
self.error_msg = r.text
raise IOError
if r.status_code >= 400:
self.error_msg = r.text
raise ValueError
|
MisterG/pygraylog
|
pygraylog/server.py
|
Python
|
gpl-3.0
| 2,067
|
from django.apps import AppConfig
class OaConfig(AppConfig):
name = 'oa'
|
htwenhe/DJOA
|
oa/apps.py
|
Python
|
mit
| 79
|
from contrib.rfc3736 import builder
from contrib.rfc3736.constants import *
from scapy.all import *
from veripy.assertions import *
from veripy.models import ComplianceTestCase
class DHCPv6Helper(ComplianceTestCase):
def build_dhcpv6_advertisement(self, s, server, client, options=True, ias=True, T1=300, T2=300):
a = DHCP6_Advertise(trid=s.trid)/ \
DHCP6OptServerId(duid=builder.duid(server.iface(0).ll_addr))/ \
DHCP6OptClientId(duid=s[DHCP6OptClientId].duid)
if options:
for option in builder.options(s[DHCP6OptOptReq].reqopts):
a = a/option
if ias:
for ia in builder.ias(s[DHCP6OptIA_NA], client, T1, T2):
a = a/ia
a = a/DHCP6OptPref()
return a
def build_dhcpv6_confirm(self, server, client, ip, iaid=0x87654322, trid=0x1235, T1=300, T2=300):
p = DHCP6_Confirm(trid=trid)/ \
DHCP6OptClientId(duid=builder.duid(client.iface(0).ll_addr))/ \
DHCP6OptIA_NA(iaid=iaid, T1=T1, T2=T2, ianaopts=DHCP6OptIAAddress(addr=ip))
return p
def build_dhcpv6_decline(self, q, server, client, T1=5400, T2=3600):
p = DHCP6_Decline(trid=q.trid)/ \
DHCP6OptServerId(duid=q[DHCP6OptServerId].duid)/ \
DHCP6OptClientId(duid=builder.duid(client.iface(0).ll_addr))/ \
DHCP6OptIA_NA(iaid=q.iaid, T1=T1, T2=T2, ianaopts=DHCP6OptIAAddress(addr=q[DHCP6OptIAAddress].addr))
return p
def build_dhcpv6_reply(self, q, server, client, ias=True, T1=300, T2=300, trid=None, dns_servers=[], dns_domains=[], pref=True, server_id=True):
p = DHCP6_Reply(trid=(trid == None and q.trid or trid))/ \
DHCP6OptClientId(duid=q[DHCP6OptClientId].duid)
if server_id:
p = p/DHCP6OptServerId(duid=builder.duid(server.iface(0).ll_addr))
if ias:
for ia in builder.ias(q[DHCP6OptIA_NA], client, T1, T2):
p = p/ia
if len(dns_servers) > 0:
p = p/DHCP6OptDNSServers(dnsservers=dns_servers)
if len(dns_domains) > 0:
p = p/DHCP6OptDNSDomains(dnsdomains=dns_domains)
if pref:
p = p/DHCP6OptPref()
return p
def build_dhcpv6_rebind(self, p, server, client):
p = DHCP6_Rebind(trid=p.trid+1)/ \
DHCP6OptClientId(duid=builder.duid(client.iface(0).ll_addr))/ \
DHCP6OptServerId(duid=p[DHCP6OptServerId].duid)/ \
DHCP6OptIA_NA(iaid=p[DHCP6OptIA_NA].iaid + 1, ianaopts=DHCP6OptIAAddress(addr=self.ip_from(p)))
return p
def build_dhcpv6_release(self, p, server, client):
p = DHCP6_Release(trid=p.trid+1)/ \
DHCP6OptClientId(duid=builder.duid(client.iface(0).ll_addr))/ \
DHCP6OptServerId(duid=p[DHCP6OptServerId].duid)/ \
DHCP6OptIA_NA(iaid=p[DHCP6OptIA_NA].iaid + 1, ianaopts=DHCP6OptIAAddress(addr=self.ip_from(p)))
return p
def build_dhcpv6_request(self, a, server, client):
p = DHCP6_Request(trid=a.trid)/ \
DHCP6OptClientId(duid=builder.duid(self.node(1).iface(0).ll_addr))/ \
DHCP6OptServerId(duid=a[DHCP6OptServerId].duid)/ \
DHCP6OptIA_NA(ianaopts=DHCP6OptIAAddress(addr=a[DHCP6OptIAAddress].addr))
return p
def build_dhcpv6_renew(self, p, server, client):
p = DHCP6_Renew(trid=p.trid+1)/ \
DHCP6OptClientId(duid=builder.duid(client.iface(0).ll_addr))/ \
DHCP6OptServerId(duid=p[DHCP6OptServerId].duid)/ \
DHCP6OptIA_NA(iaid=p[DHCP6OptIA_NA].iaid + 1, ianaopts=DHCP6OptIAAddress(addr=self.ip_from(p)))
return p
def build_dhcpv6_solicit(self, client, iaid=0x4321, trid=0x1234, T1=300, T2=300):
p = DHCP6_Solicit(trid=trid)/ \
DHCP6OptClientId(duid=builder.duid(client.iface(0).ll_addr))/ \
DHCP6OptOptReq()/ \
DHCP6OptIA_NA(iaid=iaid, T1=T1, T2=T2)
return p
def build_dhcpv6_information_request(self, client, trid=0x1234, reqopts=[23,24]):
p = DHCP6_InfoRequest(trid=trid)/ \
DHCP6OptClientId(duid=builder.duid(client.iface(0).ll_addr))/ \
DHCP6OptElapsedTime(elapsedtime=2000)/ \
DHCP6OptOptReq(reqopts=reqopts)
return p
def build_dhcpv6_relay_forward(self, packet, client, relay, with_ifaceid=True):
p = DHCP6_RelayForward(linkaddr=str(relay.iface(0).global_ip()), peeraddr=str(client.link_local_ip()), hopcount=0)
if with_ifaceid:
p = p/DHCP6OptIfaceId(ifaceid="eth0")
p = p/DHCP6OptRelayMsg()/packet
return p
def build_dhcpv6_relay_reply(self, packet, client, relay):
p = DHCP6_RelayReply(linkaddr=str(relay.iface(0).global_ip()), peeraddr=str(client.link_local_ip()), hopcount=0)/ \
DHCP6OptIfaceId(ifaceid="eth0")/ \
DHCP6OptRelayMsg()/packet
return p
def do_dhcpv6_handshake_as_client(self, server, client, iaid=0x00004321, trid=0x1234, T1=300, T2=300):
self.logger.info("Building a DHCPv6 Solicit message")
s = self.build_dhcpv6_solicit(client, iaid=iaid, trid=trid)
self.logger.info("Sending the DHCPv6 Solicit message, to request addressing parameters...")
client.send(IPv6(src=str(client.link_local_ip()), dst=AllDHCPv6RelayAgentsAndServers)/UDP(sport=DHCPv6SourcePort, dport=DHCPv6DestPort)/s)
self.logger.info("Checking for a DHCPv6 Advertise message...")
r1 = client.received(src=str(server.link_local_ip()), type=DHCP6_Advertise)
assertEqual(1, len(r1), "expected to receive a DHCPv6 Advertise")
a = r1[0]
assertHasLayer(DHCP6OptIA_NA, a, "expected the DHCPv6 Advertise to contain an IA")
assertHasLayer(DHCP6OptIAAddress, a, "expected the IA to contain an Address")
self.logger.info("Building a DHCPv6 Request message...")
q = self.build_dhcpv6_request(a, server, client)
self.logger.info("Sending the DHCPv6 Request message...")
client.send(IPv6(src=str(client.link_local_ip()), dst=AllDHCPv6RelayAgentsAndServers)/UDP(sport=DHCPv6SourcePort, dport=DHCPv6DestPort)/q)
self.logger.info("Checking for a DHCPv6 Reply message...")
r2 = client.received(src=str(server.link_local_ip()), type=DHCP6_Reply)
assertEqual(1, len(r2), "expected to receive a DHCPv6 Reply")
p = r2[0]
assertHasLayer(DHCP6OptIA_NA, p, "expected the DHCPv6 Reply to contain an IA")
assertHasLayer(DHCP6OptIAAddress, p, "expected the IA to contain an Address")
assertEqual(self.ip_from(q), self.ip_from(p), "expected the IA to contain the requested address")
return (self.ip_from(p), p)
def do_dhcpv6_handshake_as_server(self, server, client, wait=True, T1=300, T2=300):
self.ui.tell("Please restart the UUT's network interface.")
assertTrue(self.ui.ask("Has the interface restarted?"))
self.logger.info("Checking for a DHCPv6 Solicit message...")
r1 = server.received(src=str(client.link_local_ip()), dst=AllDHCPv6RelayAgentsAndServers, type=DHCP6_Solicit)
assertGreaterThanOrEqualTo(1, len(r1), "expected to receive a DHCPv6 Solicit message")
s = r1[0][UDP]
self.logger.info("Building a DHCPv6 Advertisement for the client")
a = self.build_dhcpv6_advertisement(s, server, client, T1=T1, T2=T2)
self.logger.info("Sending the DHCPv6 Advertise message, to offer the client addressing parameters...")
server.send(IPv6(src=str(server.link_local_ip()), dst=str(client.link_local_ip()))/UDP(sport=s.dport, dport=s.sport)/a)
self.logger.info("Waiting for the UUT to respond to the DHCPv6 Advertisement...")
r2 = server.received(src=str(client.link_local_ip()), dst=AllDHCPv6RelayAgentsAndServers, type=DHCP6_Request)
assertGreaterThanOrEqualTo(1, len(r2), "expected to receive a DHCPv6 Request")
q = r2[0][UDP]
assertHasLayer(DHCP6OptIA_NA, q, "expected the DHCPv6 Request to contain an IA")
assertHasLayer(DHCP6OptIAAddress, q, "expected the IA to contain an Address")
assertEqual(client.global_ip(), q[DHCP6OptIAAddress].addr, "expected the DHCPv6 Client to request the IP address offered")
self.logger.info("Building a DHCPv6 Reply message, to confirm the client's addressing parameters...")
p = self.build_dhcpv6_reply(q, server, client, T1=T1, T2=T2)
self.logger.info("Sending the DHCPv6 Reply message...")
server.send(IPv6(src=str(server.link_local_ip()), dst=str(client.link_local_ip()))/UDP(sport=s.dport, dport=s.sport)/p)
if wait:
self.logger.info("Waiting for the UUT to configure its network interface...")
self.ui.wait(5)
def ip_from(self, something_with_ia):
return something_with_ia[DHCP6OptIAAddress].addr
def confirm_dns(self, dns_server, client):
self.ui.tell("Please send an ICMPv6 Echo Request from the UUT to \"DHCPv6.TEST.EXAMPLE.COM\".")
assertTrue(self.ui.ask("Press 'y', then enter when you have done this."))
r1 = dns_server.received(src=client.global_ip(), dst=dns_server.global_ip(), type=DNS)
assertGreaterThanOrEqualTo(1, len(r1), "expected to receive at least 1 DNS Standard Query")
return r1[0]
def confirm_no_dns(self, dns_server, client):
self.ui.tell("Please send an ICMPv6 Echo Request from the UUT to \"DHCPv6.TEST.EXAMPLE.COM\".")
assertTrue(self.ui.ask("Press 'y', then enter when you have done this."))
r1 = dns_server.received(src=client.global_ip(), dst=dns_server.global_ip(), type=DNS)
assertEqual(0, len(r1), "did not expect to receive a DNS Standard Query")
def restart_and_wait_for_information_request(self, server, client):
self.ui.tell("Restart DHCPv6 on the NUT.")
assertTrue(self.ui.ask("Press 'y', then enter when the service is restarting."))
return self.wait_for_information_request(server, client)
def wait_for_information_request(self, server, client, timeout=5):
self.logger.info("Checking for a DHCPv6 Information Request message...")
r1 = server.received(dst=AllDHCPv6RelayAgentsAndServers, src=client.link_local_ip(), type=DHCP6_InfoRequest, timeout=timeout)
assertGreaterThanOrEqualTo(1, len(r1), "expected to receive at least 1 DHCPv6 Information Request")
return r1[0]
|
mwrlabs/veripy
|
contrib/rfc3736/dhcpv6.py
|
Python
|
gpl-3.0
| 10,704
|
from popit.views.base import BasePopitView
from popit.serializers import LinkSerializer
from popit.models import Link
from popit.models import Person
from popit.models import Organization
from popit.models import Post
from popit.models import Membership
from popit.models import OtherName
from popit.models import Identifier
from popit.models import ContactDetail
from rest_framework.response import Response
from django.http import Http404
from rest_framework import status
from popit.serializers.exceptions import ParentNotSetException
from popit.serializers.exceptions import ChildNotSetException
from popit.serializers.exceptions import ContentObjectNotAvailable
# Why citation refers to a field, not a link
class BaseCitationListCreateView(BasePopitView):
serializer = LinkSerializer
def get(self, request, language, pk, field):
instance = self.entity.objects.language(language).get(id=pk)
citations = instance.links.untranslated().filter(field=field)
page = self.paginator.paginate_queryset(citations, request, view=self)
serializer = self.serializer(page, language=language, many=True)
return self.paginator.get_paginated_response(serializer.data)
def post(self, request, language, pk, field):
instance = self.entity.objects.language(language).get(id=pk)
data = request.data
serializer = self.serializer(data=data, language=language)
if serializer.is_valid():
serializer.save(content_object=instance, field=field)
output = {
"result": serializer.data
}
return Response(output, status=status.HTTP_201_CREATED)
errors = {"errors": serializer.errors}
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
class BaseCitationDetailView(BasePopitView):
serializer = LinkSerializer
def get_object(self, parent, field, pk, language="en"):
try:
instance = self.entity.objects.language(language).get(id=parent)
try:
citations = instance.links.get(id=pk, field=field)
except Link.DoesNotExist:
raise Http404
return citations
except self.entity.DoesNotExist:
raise Http404
def get(self, request, language, parent, field, pk):
citations = self.get_object(parent, field, pk, language)
serializer = self.serializer(instance=citations, language=language)
data = {"result": serializer.data}
return Response(data)
def put(self, request, language, parent, field, pk):
citations = self.get_object(parent, field, pk, language)
serializer = self.serializer(citations, data=request.data, language=language)
if serializer.is_valid():
serializer.save()
output = {"result":serializer.data}
return Response(output, status=status.HTTP_200_OK)
errors = {"errors":serializer.errors}
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, language, parent, field, pk):
citations = self.get_object(parent, field, pk, language)
citations.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class BaseSubItemCitationListView(BasePopitView):
serializer = LinkSerializer
parent = None
def get_parent(self, parent_pk, language):
try:
parent = self.parent.objects.language(language).get(id=parent_pk)
return parent
except self.parent.DoesNotExist:
raise Http404
def get_citations(self, parent_pk, child_pk, field, language):
try:
parent = self.get_parent(parent_pk ,language)
child = self.get_child(parent, child_pk, language)
links = child.links.language(language).filter(field=field)
return links
except Link.DoesNotExist:
raise Http404
def get_child(self, parent, child_pk, language):
raise NotImplemented
def get(self, request, language, parent_pk, child_pk, field):
citations = self.get_citations(parent_pk, child_pk, field, language)
page = self.paginator.paginate_queryset(citations, request, view=self)
serializer = self.serializer(page, language=language, many=True)
return self.paginator.get_paginated_response(serializer.data)
def post(self, request, language, parent_pk, child_pk, field):
parent = self.get_parent(parent_pk, language)
child = self.get_child(parent, child_pk, language)
serializer = self.serializer(data=request.data, language=language)
if serializer.is_valid():
serializer.save(content_object=child, field=field)
data = { "result": serializer.data }
return Response(data, status=status.HTTP_201_CREATED)
errors = { "errors": serializer.errors }
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
class GenericOthernameCitationListView(BaseSubItemCitationListView):
entity = OtherName
def get_child(self, parent, child_pk, language):
try:
child = parent.other_names.language(language).get(id=child_pk)
return child
except self.entity.DoesNotExist:
raise Http404
class GenericIdentifierCitationListView(BaseSubItemCitationListView):
entity = Identifier
def get_child(self, parent, child_pk, language):
try:
child = parent.identifiers.language(language).get(id=child_pk)
return child
except self.entity.DoesNotExist:
raise Http404
class GenericContactDetailCitationListView(BaseSubItemCitationListView):
entity = ContactDetail
def get_child(self, parent, child_pk, language):
try:
child = parent.contact_details.language(language).get(id=child_pk)
return child
except self.entity.DoesNotExist:
raise Http404
class BaseSubItemCitationDetailView(BasePopitView):
serializer = LinkSerializer
parent = None
def get_parent(self, parent_pk, language):
try:
parent = self.parent.objects.language(language).get(id=parent_pk)
return parent
except self.parent.DoesNotExist:
raise Http404
def get_citations(self, parent_pk, child_pk, field, link_id, language):
try:
parent = self.get_parent(parent_pk ,language)
child = self.get_child(parent, child_pk, language)
links = child.links.language(language).get(id=link_id, field=field)
return links
except Link.DoesNotExist:
raise Http404
def get_child(self, parent, child_pk, language):
raise NotImplemented
def get(self, requests, language, parent_pk, child_pk, field, link_id):
citations = self.get_citations(parent_pk, child_pk, field, link_id, language)
serializer = self.serializer(citations, language=language)
result = { "result": serializer.data }
return Response(result)
def put(self, requests, language, parent_pk, child_pk, field, link_id):
data = requests.data
citations = self.get_citations(parent_pk, child_pk, field, link_id, language)
serializer = self.serializer(citations, data=data, language=language)
if serializer.is_valid():
serializer.save()
result = { "result": serializer.data }
return Response(result, status=status.HTTP_200_OK)
errors = { "errors": serializer.errors }
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, requests, language, parent_pk, child_pk, field, link_id):
citations = self.get_citations(parent_pk, child_pk, field, link_id, language)
citations.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class GenericOthernameCitationDetailView(BaseSubItemCitationDetailView):
entity = OtherName
def get_child(self, parent, child_pk, language):
try:
child = parent.other_names.language(language).get(id=child_pk)
return child
except self.entity.DoesNotExist:
raise Http404
class GenericIdentifierCitationDetailView(BaseSubItemCitationDetailView):
entity = Identifier
def get_child(self, parent, child_pk, language):
try:
child = parent.identifiers.language(language).get(id=child_pk)
return child
except self.entity.DoesNotExist:
raise Http404
class GenericContactDetailCitationDetailView(BaseSubItemCitationDetailView):
entity = ContactDetail
def get_child(self, parent, child_pk, language):
try:
child = parent.contact_details.language(language).get(id=child_pk)
return child
except self.entity.DoesNotExist:
raise Http404
# This is a view only view to see which field have citations. No create/update/delete
class BaseFieldCitationView(BasePopitView):
serializer = LinkSerializer
def get_citations(self, pk, language):
data = {}
instance = self.entity.objects.language(language).get(id=pk)
for field in instance._meta.fields:
# I don't care about id, and yes it is hardcoded I don't care!
if field.attname == "id":
continue
temp = instance.links.filter(field=field.attname)
citations = LinkSerializer(temp, language=language, many=True)
data[field.attname] = citations.data
# Turns out that in hvad translated field is in another db. Which is cool then we can add more language without alter table!!
for field in instance._translated_field_names:
# I don't care about id, and yes it is hardcoded I don't care!
if field == "master_id" or field == "id":
continue
temp = instance.links.filter(field=field)
citations = LinkSerializer(temp, language=language, many=True)
data[field] = citations.data
return data
def get(self, request, language, pk):
citations = self.get_citations(pk, language)
data = {"result": citations}
return Response(data)
class BaseSubItemFieldCitationView(BasePopitView):
parent = None
serializer = LinkSerializer
def get_parent(self, parent_pk, language):
try:
parent = self.parent.objects.language(language).get(id=parent_pk)
return parent
except self.parent.DoesNotExist:
raise Http404
def get_citations(self, parent_pk, child_pk, language):
parent = self.get_parent(parent_pk, language)
child = self.get_child(parent, child_pk, language)
data = {}
for field in child._meta.fields:
if field.attname == "content_object":
continue
temp = child.links.filter(field=field.attname)
citation = LinkSerializer(temp, language=language, many=True)
data[field.attname] = citation.data
return data
def get_child(self, parent, child_pk, language):
raise NotImplemented
def get(self, request, language, parent_pk, child_pk):
citations = self.get_citations(parent_pk, child_pk, language)
result = { "result": citations }
return Response(result)
class GenericOtherNameFieldCitationView(BaseSubItemFieldCitationView):
entity = OtherName
def get_child(self, parent, child_pk, language):
try:
child = parent.other_names.language(language).get(id=child_pk)
return child
except self.entity.DoesNotExist:
raise Http404
class GenericIdentifierFieldCitationView(BaseSubItemFieldCitationView):
entity = Identifier
def get_child(self, parent, child_pk, language):
try:
child = parent.identifiers.language(language).get(id=child_pk)
return child
except self.entity.DoesNotExist:
raise Http404
class GenericContactDetailFieldCitationView(BaseSubItemFieldCitationView):
entity = ContactDetail
def get_child(self, parent, child_pk, language):
try:
child = parent.contact_details.language(language).get(id=child_pk)
return child
except self.entity.DoesNotExist:
raise Http404
|
Sinar/popit_ng
|
popit/views/citation.py
|
Python
|
agpl-3.0
| 12,404
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import (
AutoMlImageClassification,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import (
AutoMlImageClassificationInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_classification import (
AutoMlImageClassificationMetadata,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import (
AutoMlImageObjectDetection,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import (
AutoMlImageObjectDetectionInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_object_detection import (
AutoMlImageObjectDetectionMetadata,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import (
AutoMlImageSegmentation,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import (
AutoMlImageSegmentationInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_image_segmentation import (
AutoMlImageSegmentationMetadata,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import (
AutoMlTables,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import (
AutoMlTablesInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_tables import (
AutoMlTablesMetadata,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import (
AutoMlTextClassification,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_classification import (
AutoMlTextClassificationInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import (
AutoMlTextExtraction,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_extraction import (
AutoMlTextExtractionInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import (
AutoMlTextSentiment,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_text_sentiment import (
AutoMlTextSentimentInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import (
AutoMlVideoActionRecognition,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_action_recognition import (
AutoMlVideoActionRecognitionInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import (
AutoMlVideoClassification,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_classification import (
AutoMlVideoClassificationInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import (
AutoMlVideoObjectTracking,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.automl_video_object_tracking import (
AutoMlVideoObjectTrackingInputs,
)
from google.cloud.aiplatform.v1.schema.trainingjob.definition_v1.types.export_evaluated_data_items_config import (
ExportEvaluatedDataItemsConfig,
)
__all__ = (
"AutoMlImageClassification",
"AutoMlImageClassificationInputs",
"AutoMlImageClassificationMetadata",
"AutoMlImageObjectDetection",
"AutoMlImageObjectDetectionInputs",
"AutoMlImageObjectDetectionMetadata",
"AutoMlImageSegmentation",
"AutoMlImageSegmentationInputs",
"AutoMlImageSegmentationMetadata",
"AutoMlTables",
"AutoMlTablesInputs",
"AutoMlTablesMetadata",
"AutoMlTextClassification",
"AutoMlTextClassificationInputs",
"AutoMlTextExtraction",
"AutoMlTextExtractionInputs",
"AutoMlTextSentiment",
"AutoMlTextSentimentInputs",
"AutoMlVideoActionRecognition",
"AutoMlVideoActionRecognitionInputs",
"AutoMlVideoClassification",
"AutoMlVideoClassificationInputs",
"AutoMlVideoObjectTracking",
"AutoMlVideoObjectTrackingInputs",
"ExportEvaluatedDataItemsConfig",
)
|
sasha-gitg/python-aiplatform
|
google/cloud/aiplatform/v1/schema/trainingjob/definition/__init__.py
|
Python
|
apache-2.0
| 4,992
|
import unittest
import pysal
from pysal.spatial_dynamics import ergodic
import numpy as np
class SteadyState_Tester(unittest.TestCase):
def setUp(self):
self.p = np.matrix([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
def test_steady_state(self):
obs = ergodic.steady_state(self.p).tolist()
exp = np.matrix([[0.4], [0.2], [0.4]]).tolist()
k = self.p.shape[0]
for i in range(k):
self.assertAlmostEqual(exp[i][0], obs[i][0])
class Fmpt_Tester(unittest.TestCase):
def setUp(self):
self.p = np.matrix([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
def test_fmpt(self):
k = self.p.shape[0]
obs = ergodic.fmpt(self.p).flatten().tolist()[0]
exp = np.matrix([[2.5, 4., 3.33333333], [2.66666667, 5.,
2.66666667], [3.33333333, 4., 2.5]])
exp = exp.flatten().tolist()[0]
for i in range(k):
self.assertAlmostEqual(exp[i], obs[i])
class VarFmpt_Tester(unittest.TestCase):
def setUp(self):
self.p = np.matrix([[.5, .25, .25], [.5, 0, .5], [.25, .25, .5]])
def test_var_fmpt(self):
k = self.p.shape[0]
obs = ergodic.var_fmpt(self.p).flatten().tolist()[0]
exp = np.matrix([[5.58333333, 12., 6.88888889], [6.22222222,
12., 6.22222222], [6.88888889, 12., 5.58333333]])
exp = exp.flatten().tolist()[0]
for i in range(k):
self.assertAlmostEqual(exp[i], obs[i])
suite = unittest.TestSuite()
test_classes = [SteadyState_Tester, Fmpt_Tester, VarFmpt_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
|
badjr/pysal
|
pysal/spatial_dynamics/tests/test_ergodic.py
|
Python
|
bsd-3-clause
| 1,837
|
# Copyright 2009-2014 Justin Riley
#
# This file is part of StarCluster.
#
# StarCluster is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# StarCluster is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with StarCluster. If not, see <http://www.gnu.org/licenses/>.
import posixpath
from starcluster import clustersetup
from starcluster.templates import slurm
from starcluster.logger import log
class SlurmPlugin(clustersetup.DefaultClusterSetup):
SLURM_CONFDIR = '/etc/slurm-llnl'
MUNGE_CONFDIR = '/etc/munge'
MUNGE_DEFAULTS = '/etc/default/munge'
SLURM_SERVICE = 'slurm-llnl'
MUNGE_SERVICE = 'munge'
def __init__(self, master_can_run_jobs=True, **kwargs):
self._master_can_run_jobs = master_can_run_jobs
super(SlurmPlugin, self).__init__(**kwargs)
def _slurm_path(self, path):
return posixpath.join(self.SLURM_CONFDIR, path)
def get_worker_nodes(self):
return [t for t in self._nodes if
self._master_can_run_jobs or not t.is_master()]
def _node_defs(self):
lines = []
for node in self.get_worker_nodes():
lines.append(
'NodeName=%s CPUs=%d State=UNKNOWN' % (
node.alias, node.num_processors))
return '\n'.join(lines)
def _partition_def(self):
node_list = ','.join(n.alias for n in self.get_worker_nodes())
return 'PartitionName=debug Nodes=%s ' \
'Default=YES MaxTime=INFINITE State=UP' % (node_list)
def _update_config(self):
master = self._master
slurm_conf = master.ssh.remote_file(
self._slurm_path('slurm.conf'), "w")
conf = slurm.conf_template % {
'node_defs': self._node_defs(),
'partition_def': self._partition_def()
}
slurm_conf.write(conf)
slurm_conf.close()
def _restart_slurm(self, node):
node.ssh.execute('service %s restart' % (self.SLURM_SERVICE))
def _start_munge(self, node):
outf = node.ssh.remote_file(
self.MUNGE_DEFAULTS, "w")
outf.write(slurm.munge_defaults)
outf.close()
node.ssh.execute('service %s restart' % (self.MUNGE_SERVICE))
def _start_services(self, node):
self._start_munge(node)
self._restart_slurm(node)
def _setup_slurm(self):
"""
Set up Slurm on StarCluster
"""
master = self._master
log.info("Creating MUNGE key")
master.ssh.execute('create-munge-key')
self._setup_nfs(
self.nodes,
export_paths=[self.SLURM_CONFDIR, self.MUNGE_CONFDIR],
start_server=False)
log.info("Configuring and starting Slurm")
self._update_config()
for node in [self._master] + self.nodes:
self.pool.simple_job(
self._start_services, (node,), jobid=node.alias)
self.pool.wait(numtasks=len(self.nodes))
def run(self, nodes, master, user, user_shell, volumes):
log.info("Configuring Slurm...")
self._nodes = nodes
self._master = master
self._user = user
self._user_shell = user_shell
self._volumes = volumes
self._setup_slurm()
def on_add_node(self, node, nodes, master, user, user_shell, volumes):
self._nodes = nodes
self._master = master
self._user = user
self._user_shell = user_shell
self._volumes = volumes
log.info("Adding %s to Slum" % node.alias)
self._setup_nfs(
nodes=[node],
export_paths=[self.SLURM_CONFDIR, self.MUNGE_CONFDIR],
start_server=False)
self._start_munge(node)
self._update_config()
self._restart_slurm(master)
self._restart_slurm(node)
def on_remove_node(self, node, nodes, master, user, user_shell, volumes):
self._nodes = [t for t in nodes if t != node]
self._master = master
self._user = user
self._user_shell = user_shell
self._volumes = volumes
log.info("Removing %s from Slurm" % node.alias)
self._remove_nfs_exports(node)
self._update_config()
self._restart_slurm(master)
|
muccg/StarCluster-plugins
|
plugins/slurm.py
|
Python
|
gpl-3.0
| 4,667
|
#!/home/mark/myResearch/django-farmersale/farmersale-env/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
MarkTseng/django-farmersale
|
farmersale-env/bin/django-admin.py
|
Python
|
mit
| 174
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('djunin', '0023_auto_20160522_0657'),
]
operations = [
migrations.CreateModel(
name='DjuninNodePermission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('node', models.ForeignKey(related_name='permissions', to='djunin.Node')),
('object_ct', models.ForeignKey(to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
),
migrations.AlterUniqueTogether(
name='djuninnodepermission',
unique_together=set([('node', 'object_ct', 'object_id')]),
),
]
|
ercpe/djunin
|
djunin/migrations/0024_auto_20160708_0439.py
|
Python
|
gpl-3.0
| 1,011
|
# sqlalchemy/__init__.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .sql import (
alias,
all_,
and_,
any_,
asc,
between,
bindparam,
case,
cast,
collate,
column,
delete,
desc,
distinct,
except_,
except_all,
exists,
extract,
false,
func,
funcfilter,
insert,
intersect,
intersect_all,
join,
lateral,
literal,
literal_column,
modifier,
not_,
null,
or_,
outerjoin,
outparam,
over,
select,
subquery,
table,
tablesample,
text,
true,
tuple_,
type_coerce,
union,
union_all,
update,
within_group,
)
from .types import (
ARRAY,
BIGINT,
BINARY,
BLOB,
BOOLEAN,
BigInteger,
Binary,
Boolean,
CHAR,
CLOB,
DATE,
DATETIME,
DECIMAL,
Date,
DateTime,
Enum,
FLOAT,
Float,
INT,
INTEGER,
Integer,
Interval,
JSON,
LargeBinary,
NCHAR,
NVARCHAR,
NUMERIC,
Numeric,
PickleType,
REAL,
SMALLINT,
SmallInteger,
String,
TEXT,
TIME,
TIMESTAMP,
Text,
Time,
TypeDecorator,
Unicode,
UnicodeText,
VARBINARY,
VARCHAR,
)
from .schema import (
CheckConstraint,
Column,
ColumnDefault,
Constraint,
DefaultClause,
FetchedValue,
ForeignKey,
ForeignKeyConstraint,
Index,
MetaData,
PassiveDefault,
PrimaryKeyConstraint,
Sequence,
Table,
ThreadLocalMetaData,
UniqueConstraint,
DDL,
BLANK_SCHEMA
)
from .inspection import inspect
from .engine import create_engine, engine_from_config
__version__ = '1.1.5'
def __go(lcls):
global __all__
from . import events
from . import util as _sa_util
import inspect as _inspect
__all__ = sorted(name for name, obj in lcls.items()
if not (name.startswith('_') or _inspect.ismodule(obj)))
_sa_util.dependencies.resolve_all("sqlalchemy")
__go(locals())
|
weisongchen/flaskapp
|
venv/lib/python2.7/site-packages/sqlalchemy/__init__.py
|
Python
|
mit
| 2,217
|
#!/usr/bin/python
# Copyright (c) 2013 The Bitcoin Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
from urllib import urlopen
import requests
import getpass
from string import Template
import sys
import os
import subprocess
class RunError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def run(command, **kwargs):
fail_hard = kwargs.pop("fail_hard", True)
# output to /dev/null by default:
kwargs.setdefault("stdout", open('/dev/null', 'w'))
kwargs.setdefault("stderr", open('/dev/null', 'w'))
command = Template(command).substitute(os.environ)
if "TRACE" in os.environ:
if 'cwd' in kwargs:
print("[cwd=%s] %s"%(kwargs['cwd'], command))
else: print(command)
try:
process = subprocess.Popen(command.split(' '), **kwargs)
process.wait()
except KeyboardInterrupt:
process.terminate()
raise
if process.returncode != 0 and fail_hard:
raise RunError("Failed: "+command)
return process.returncode
def checkout_pull(clone_url, commit, out):
# Init
build_dir=os.environ["BUILD_DIR"]
run("umount ${CHROOT_COPY}/proc", fail_hard=False)
run("rsync --delete -apv ${CHROOT_MASTER}/ ${CHROOT_COPY}")
run("rm -rf ${CHROOT_COPY}${SCRIPTS_DIR}")
run("cp -a ${SCRIPTS_DIR} ${CHROOT_COPY}${SCRIPTS_DIR}")
# Merge onto upstream/master
run("rm -rf ${BUILD_DIR}")
run("mkdir -p ${BUILD_DIR}")
run("git clone ${CLONE_URL} ${BUILD_DIR}")
run("git remote add pull "+clone_url, cwd=build_dir, stdout=out, stderr=out)
run("git fetch pull", cwd=build_dir, stdout=out, stderr=out)
if run("git merge "+ commit, fail_hard=False, cwd=build_dir, stdout=out, stderr=out) != 0:
return False
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${BUILD_DIR}", stdout=out, stderr=out)
run("mount --bind /proc ${CHROOT_COPY}/proc")
return True
def commentOn(commentUrl, success, inMerge, needTests, linkUrl):
common_message = """
This test script verifies pulls every time they are updated. It, however, dies sometimes and fails to test properly. If you are waiting on a test, please check timestamps to verify that the test.log is moving at http://jenkins.bluematt.me/pull-tester/current/
Contact BlueMatt on freenode if something looks broken."""
# Remove old BitcoinPullTester comments (I'm being lazy and not paginating here)
recentcomments = requests.get(commentUrl+"?sort=created&direction=desc",
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
for comment in recentcomments:
if comment["user"]["login"] == os.environ["GITHUB_USER"] and common_message in comment["body"]:
requests.delete(comment["url"],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
if success == True:
if needTests:
message = "Automatic sanity-testing: PLEASE ADD TEST-CASES, though technically passed. See " + linkUrl + " for binaries and test log."
else:
message = "Automatic sanity-testing: PASSED, see " + linkUrl + " for binaries and test log."
post_data = { "body" : message + common_message}
elif inMerge:
post_data = { "body" : "Automatic sanity-testing: FAILED MERGE, see " + linkUrl + " for test log." + """
This pull does not merge cleanly onto current master""" + common_message}
else:
post_data = { "body" : "Automatic sanity-testing: FAILED BUILD/TEST, see " + linkUrl + " for binaries and test log." + """
This could happen for one of several reasons:
1. It chanages changes build scripts in a way that made them incompatible with the automated testing scripts (please tweak those patches in qa/pull-tester)
2. It adds/modifies tests which test network rules (thanks for doing that), which conflicts with a patch applied at test time
3. It does not build on either Linux i386 or Win32 (via MinGW cross compile)
4. The test suite fails on either Linux i386 or Win32
5. The block test-cases failed (lookup the first bNN identifier which failed in https://github.com/TheBlueMatt/test-scripts/blob/master/FullBlockTestGenerator.java)
If you believe this to be in error, please ping BlueMatt on freenode or TheBlueMatt here.
""" + common_message}
resp = requests.post(commentUrl, json.dumps(post_data), auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"]))
def testpull(number, comment_url, clone_url, commit):
print("Testing pull %d: %s : %s"%(number, clone_url,commit))
dir = os.environ["RESULTS_DIR"] + "/" + commit + "/"
print(" ouput to %s"%dir)
if os.path.exists(dir):
os.system("rm -r " + dir)
os.makedirs(dir)
currentdir = os.environ["RESULTS_DIR"] + "/current"
os.system("rm -r "+currentdir)
os.system("ln -s " + dir + " " + currentdir)
out = open(dir + "test.log", 'w+')
resultsurl = os.environ["RESULTS_URL"] + commit
checkedout = checkout_pull(clone_url, commit, out)
if checkedout != True:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, True, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
return
run("rm -rf ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("mkdir -p ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False);
run("chown -R ${BUILD_USER}:${BUILD_GROUP} ${CHROOT_COPY}/${OUT_DIR}", fail_hard=False)
script = os.environ["BUILD_PATH"]+"/qa/pull-tester/pull-tester.sh"
script += " ${BUILD_PATH} ${MINGW_DEPS_DIR} ${SCRIPTS_DIR}/CreditsdComparisonTool_jar/CreditsdComparisonTool.jar 0 6 ${OUT_DIR}"
returncode = run("chroot ${CHROOT_COPY} sudo -u ${BUILD_USER} -H timeout ${TEST_TIMEOUT} "+script,
fail_hard=False, stdout=out, stderr=out)
run("mv ${CHROOT_COPY}/${OUT_DIR} " + dir)
run("mv ${BUILD_DIR} " + dir)
if returncode == 42:
print("Successfully tested pull (needs tests) - sending comment to: " + comment_url)
commentOn(comment_url, True, False, True, resultsurl)
elif returncode != 0:
print("Failed to test pull - sending comment to: " + comment_url)
commentOn(comment_url, False, False, False, resultsurl)
else:
print("Successfully tested pull - sending comment to: " + comment_url)
commentOn(comment_url, True, False, False, resultsurl)
open(os.environ["TESTED_DB"], "a").write(commit + "\n")
def environ_default(setting, value):
if not setting in os.environ:
os.environ[setting] = value
if getpass.getuser() != "root":
print("Run me as root!")
sys.exit(1)
if "GITHUB_USER" not in os.environ or "GITHUB_AUTH_TOKEN" not in os.environ:
print("GITHUB_USER and/or GITHUB_AUTH_TOKEN environment variables not set")
sys.exit(1)
environ_default("CLONE_URL", "https://github.com/bitcoin/bitcoin.git")
environ_default("MINGW_DEPS_DIR", "/mnt/w32deps")
environ_default("SCRIPTS_DIR", "/mnt/test-scripts")
environ_default("CHROOT_COPY", "/mnt/chroot-tmp")
environ_default("CHROOT_MASTER", "/mnt/chroot")
environ_default("OUT_DIR", "/mnt/out")
environ_default("BUILD_PATH", "/mnt/bitcoin")
os.environ["BUILD_DIR"] = os.environ["CHROOT_COPY"] + os.environ["BUILD_PATH"]
environ_default("RESULTS_DIR", "/mnt/www/pull-tester")
environ_default("RESULTS_URL", "http://jenkins.bluematt.me/pull-tester/")
environ_default("GITHUB_REPO", "bitcoin/bitcoin")
environ_default("TESTED_DB", "/mnt/commits-tested.txt")
environ_default("BUILD_USER", "matt")
environ_default("BUILD_GROUP", "matt")
environ_default("TEST_TIMEOUT", str(60*60*2))
print("Optional usage: pull-tester.py 2112")
f = open(os.environ["TESTED_DB"])
tested = set( line.rstrip() for line in f.readlines() )
f.close()
if len(sys.argv) > 1:
pull = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls/"+sys.argv[1],
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
else:
for page in range(1,100):
result = requests.get("https://api.github.com/repos/"+os.environ["GITHUB_REPO"]+"/pulls?state=open&page=%d"%(page,),
auth=(os.environ['GITHUB_USER'], os.environ["GITHUB_AUTH_TOKEN"])).json
if len(result) == 0: break;
for pull in result:
if pull["head"]["sha"] in tested:
print("Pull %d already tested"%(pull["number"],))
continue
testpull(pull["number"], pull["_links"]["comments"]["href"],
pull["head"]["repo"]["clone_url"], pull["head"]["sha"])
|
credits-currency/credits
|
qa/pull-tester/pull-tester.py
|
Python
|
mit
| 8,944
|
import unittest, sys
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_browse as h2b
from h2o_test import find_file, dump_json, verboseprint
expectedZeros = [0, 4914, 656, 24603, 38665, 124, 13, 5, 1338, 51, 320216, 551128, 327648, 544044, 577981,
573487, 576189, 568616, 579415, 574437, 580907, 580833, 579865, 548378, 568602, 551041,
563581, 580413, 581009, 578167, 577590, 579113, 576991, 571753, 580174, 547639, 523260,
559734, 580538, 578423, 579926, 580066, 465765, 550842, 555346, 528493, 535858, 579401,
579121, 580893, 580714, 565439, 567206, 572262, 0]
def assertEqualMsg(a, b): assert a == b, "%s %s" % (a, b)
def parseKeyIndexedCheck(frames_result, multiplyExpected):
# get the name of the frame?
print ""
frame = frames_result['frames'][0]
rows = frame['rows']
columns = frame['columns']
for i,c in enumerate(columns):
label = c['label']
stype = c['type']
missing = c['missing']
zeros = c['zeros']
domain = c['domain']
print "column: %s label: %s type: %s missing: %s zeros: %s domain: %s" %\
(i,label,stype,missing,zeros,domain)
# files are concats of covtype. so multiply expected
assertEqualMsg(zeros, expectedZeros[i] * multiplyExpected)
assertEqualMsg(label,"C%s" % (i+1))
assertEqualMsg(stype,"int")
assertEqualMsg(missing, 0)
assertEqualMsg(domain, None)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_parse_covtype_2(self):
tryList = [
('covtype.data', 1, 30),
('covtype20x.data', 20, 120),
]
for (csvFilename, multiplyExpected, timeoutSecs) in tryList:
# import_result = a_node.import_files(path=find_file("smalldata/logreg/prostate.csv"))
importFolderPath = "standard"
hex_key = 'covtype.hex'
csvPathname = importFolderPath + "/" + csvFilename
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='local',
timeoutSecs=timeoutSecs, hex_key=hex_key,
chunk_size=4194304*2, doSummary=False)
pA = h2o_cmd.ParseObj(parseResult)
iA = h2o_cmd.InspectObj(pA.parse_key)
print iA.missingList, iA.labelList, iA.numRows, iA.numCols
for i in range(1):
print "Summary on column", i
co = h2o_cmd.runSummary(key=hex_key, column=i)
k = parseResult['frames'][0]['key']['name']
# print "parseResult:", dump_json(parseResult)
a_node = h2o.nodes[0]
frames_result = a_node.frames(key=k, row_count=5)
# print "frames_result from the first parseResult key", dump_json(frames_result)
parseKeyIndexedCheck(frames_result, multiplyExpected)
if __name__ == '__main__':
h2o.unit_main()
|
bikash/h2o-dev
|
py2/testdir_single_jvm/test_parse_covtype_2.py
|
Python
|
apache-2.0
| 3,121
|
# -*- coding: utf-8 -*-
"""
eve.flaskapp
~~~~~~~~~~~~
This module implements the central WSGI application object as a Flask
subclass.
:copyright: (c) 2015 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import eve
import sys
import os
import copy
from flask import Flask
from werkzeug.routing import BaseConverter
from werkzeug.serving import WSGIRequestHandler
from eve.io.mongo import Mongo, Validator, GridFSMediaStorage, create_index
from eve.exceptions import ConfigException, SchemaException
from eve.endpoints import collections_endpoint, item_endpoint, home_endpoint, \
error_endpoint, media_endpoint
from eve.defaults import build_defaults
from eve.utils import api_prefix, extract_key_values
from events import Events
class EveWSGIRequestHandler(WSGIRequestHandler):
""" Extend werkzeug request handler to include current Eve version in all
responses, which is super-handy for debugging.
"""
@property
def server_version(self):
return 'Eve/%s ' % eve.__version__ + super(EveWSGIRequestHandler,
self).server_version
class RegexConverter(BaseConverter):
""" Extend werkzeug routing by supporting regex for urls/API endpoints """
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
class Eve(Flask, Events):
""" The main Eve object. On initialization it will load Eve settings, then
configure and enable the API endpoints. The API is launched by executing
the code below:::
app = Eve()
app.run()
:param import_name: the name of the application package
:param settings: the name of the settings file. Defaults to `settings.py`.
:param validator: custom validation class. Must be a
:class:`~cerberus.Validator` subclass. Defaults to
:class:`eve.io.mongo.Validator`.
:param data: the data layer class. Must be a :class:`~eve.io.DataLayer`
subclass. Defaults to :class:`~eve.io.Mongo`.
:param auth: the authentication class used to authenticate incoming
requests. Must be a :class: `eve.auth.BasicAuth` subclass.
:param redis: the redis (pyredis) instance used by the Rate-Limiting
feature, if enabled.
:param url_converters: dictionary of Flask url_converters to add to
supported ones (int, float, path, regex).
:param json_encoder: custom json encoder class. Must be a
JSONEncoder subclass. You probably want it to be
as eve.io.base.BaseJSONEncoder subclass.
:param media: the media storage class. Must be a
:class:`~eve.io.media.MediaStorage` subclass.
:param kwargs: optional, standard, Flask parameters.
.. versionchanged:: 0.4
Ensure all errors returns a parseable body. Closes #365.
'auth' argument can be either an instance or a callable. Closes #248.
Made resource setup more DRY by calling register_resource.
.. versionchanged:: 0.3
Support for optional media storage system. Defaults to
GridFSMediaStorage.
.. versionchanged:: 0.2
Support for additional Flask url converters.
Support for optional, custom json encoder class.
Support for endpoint-level authenticatoin classes.
New method Eve.register_resource() for registering new resource after
initialization of Eve object. This is needed for simpler initialization
API of all ORM/ODM extensions.
.. versionchanged:: 0.1.0
Now supporting both "trailing slashes" and "no-trailing slashes" URLs.
.. versionchanged:: 0.0.7
'redis' argument added to handle an accessory Redis server (currently
used by the Rate-Limiting feature).
.. versionchanged:: 0.0.6
'Events' added to the list of super classes, allowing for the arbitrary
raising of events within the application.
.. versionchanged:: 0.0.4
'auth' argument added to handle authentication classes
"""
#: Allowed methods for resource endpoints
supported_resource_methods = ['GET', 'POST', 'DELETE']
#: Allowed methods for item endpoints
supported_item_methods = ['GET', 'PATCH', 'DELETE', 'PUT']
def __init__(self, import_name=__package__, settings='settings.py',
validator=Validator, data=Mongo, auth=None, redis=None,
url_converters=None, json_encoder=None,
media=GridFSMediaStorage, **kwargs):
""" Eve main WSGI app is implemented as a Flask subclass. Since we want
to be able to launch our API by simply invoking Flask's run() method,
we need to enhance our super-class a little bit.
"""
super(Eve, self).__init__(import_name, **kwargs)
self.validator = validator
self.settings = settings
self.load_config()
self.validate_domain_struct()
# enable regex routing
self.url_map.converters['regex'] = RegexConverter
# optional url_converters and json encoder
if url_converters:
self.url_map.converters.update(url_converters)
self.data = data(self)
if json_encoder:
self.data.json_encoder_class = json_encoder
self.media = media(self) if media else None
self.redis = redis
if auth:
self.auth = auth() if callable(auth) else auth
else:
self.auth = None
self._init_url_rules()
self._init_media_endpoint()
self._init_oplog()
# validate and set defaults for each resource
# Use a snapshot of the DOMAIN setup for iteration so
# further insertion of versioned resources do not
# cause a RuntimeError due to the change of size of
# the dict
domain_copy = copy.deepcopy(self.config['DOMAIN'])
for resource, settings in domain_copy.items():
self.register_resource(resource, settings)
# it seems like both domain_copy and config['DOMAIN']
# suffered changes at this point, so merge them
# self.config['DOMAIN'].update(domain_copy)
self.register_error_handlers()
def run(self, host=None, port=None, debug=None, **options):
"""
Pass our own subclass of :class:`werkzeug.serving.WSGIRequestHandler
to Flask.
:param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
have the server available externally as well. Defaults to
``'127.0.0.1'``.
:param port: the port of the webserver. Defaults to ``5000``.
:param debug: if given, enable or disable debug mode.
See :attr:`debug`.
:param options: the options to be forwarded to the underlying
Werkzeug server. See
:func:`werkzeug.serving.run_simple` for more
information. """
options.setdefault('request_handler', EveWSGIRequestHandler)
super(Eve, self).run(host, port, debug, **options)
def load_config(self):
""" API settings are loaded from standard python modules. First from
`settings.py`(or alternative name/path passed as an argument) and
then, when defined, from the file specified in the
`EVE_SETTINGS` environment variable.
Since we are a Flask subclass, any configuration value supported by
Flask itself is available (besides Eve's proper settings).
.. versionchanged:: 0.5
Allow EVE_SETTINGS envvar to be used exclusively. Closes #461.
.. versionchanged:: 0.2
Allow use of a dict object as settings.
"""
# load defaults
self.config.from_object('eve.default_settings')
# overwrite the defaults with custom user settings
if isinstance(self.settings, dict):
self.config.update(self.settings)
else:
if os.path.isabs(self.settings):
pyfile = self.settings
else:
abspath = os.path.abspath(os.path.dirname(sys.argv[0]))
pyfile = os.path.join(abspath, self.settings)
try:
self.config.from_pyfile(pyfile)
except:
# assume an envvar is going to be used exclusively
pass
# overwrite settings with custom environment variable
envvar = 'EVE_SETTINGS'
if os.environ.get(envvar):
self.config.from_envvar(envvar)
def validate_domain_struct(self):
""" Validates that Eve configuration settings conform to the
requirements.
"""
try:
domain = self.config['DOMAIN']
except:
raise ConfigException('DOMAIN dictionary missing or wrong.')
if not isinstance(domain, dict):
raise ConfigException('DOMAIN must be a dict.')
def validate_config(self):
""" Makes sure that REST methods expressed in the configuration
settings are supported.
.. versionchanged:: 0.2.0
Default supported methods are now class-level attributes.
Resource validation delegated to _validate_resource_settings().
.. versionchanged:: 0.1.0
Support for PUT method.
.. versionchanged:: 0.0.4
Support for 'allowed_roles' and 'allowed_item_roles'
.. versionchanged:: 0.0.2
Support for DELETE resource method.
"""
# make sure that global resource methods are supported.
self.validate_methods(self.supported_resource_methods,
self.config.get('RESOURCE_METHODS'),
'resource')
# make sure that global item methods are supported.
self.validate_methods(self.supported_item_methods,
self.config.get('ITEM_METHODS'),
'item')
# make sure that individual resource/item methods are supported.
for resource, settings in self.config['DOMAIN'].items():
self._validate_resource_settings(resource, settings)
def _validate_resource_settings(self, resource, settings):
""" Validates one resource in configuration settings.
:param resource: name of the resource which settings refer to.
:param settings: settings of resource to be validated.
.. versionchanged:: 0.4
validate that auth_field is not set to ID_FIELD. See #266.
.. versionadded:: 0.2
"""
self.validate_methods(self.supported_resource_methods,
settings['resource_methods'],
'[%s] resource ' % resource)
self.validate_methods(self.supported_item_methods,
settings['item_methods'],
'[%s] item ' % resource)
# while a resource schema is optional for read-only access,
# it is mandatory for write-access to resource/items.
if 'POST' in settings['resource_methods'] or \
'PATCH' in settings['item_methods']:
if len(settings['schema']) == 0:
raise ConfigException('A resource schema must be provided '
'when POST or PATCH methods are allowed '
'for a resource [%s].' % resource)
self.validate_roles('allowed_roles', settings, resource)
self.validate_roles('allowed_read_roles', settings, resource)
self.validate_roles('allowed_write_roles', settings, resource)
self.validate_roles('allowed_item_roles', settings, resource)
self.validate_roles('allowed_item_read_roles', settings, resource)
self.validate_roles('allowed_item_write_roles', settings, resource)
if settings['auth_field'] == self.config['ID_FIELD']:
raise ConfigException('"%s": auth_field cannot be set to ID_FIELD '
'(%s)' % (resource, self.config['ID_FIELD']))
self.validate_schema(resource, settings['schema'])
def validate_roles(self, directive, candidate, resource):
""" Validates that user role directives are syntactically and formally
adeguate.
:param directive: either 'allowed_[read_|write_]roles' or
'allow_item_[read_|write_]roles'.
:param candidate: the candidate setting to be validated.
:param resource: name of the resource to which the candidate settings
refer to.
.. versionadded:: 0.0.4
"""
roles = candidate[directive]
if not isinstance(roles, list):
raise ConfigException("'%s' must be list"
"[%s]." % (directive, resource))
def validate_methods(self, allowed, proposed, item):
""" Compares allowed and proposed methods, raising a `ConfigException`
when they don't match.
:param allowed: a list of supported (allowed) methods.
:param proposed: a list of proposed methods.
:param item: name of the item to which the methods would be applied.
Used when raising the exception.
"""
diff = set(proposed) - set(allowed)
if diff:
raise ConfigException('Unallowed %s method(s): %s. '
'Supported: %s' %
(item, ', '.join(diff),
', '.join(allowed)))
def validate_schema(self, resource, schema):
""" Validates a resource schema.
:param resource: resource name.
:param schema: schema definition for the resource.
.. versionchanged:: 0.5
Add ETAG to automatic fields check.
.. versionchanged:: 0.4
Checks against offending document versioning fields.
Supports embedded data_relation with version.
.. versionchanged:: 0.2
Allow ID_FIELD in resource schema if not of 'objectid' type.
.. versionchanged:: 0.1.1
'collection' setting renamed to 'resource' (data_relation).
Fix order of string arguments in exception message.
.. versionchanged:: 0.1.0
Validation for 'embeddable' fields.
.. versionchanged:: 0.0.5
Validation of the 'data_relation' field rule.
Now collecting offending items in a list and inserting results into
the exception message.
"""
resource_settings = self.config['DOMAIN'][resource]
# ensure automatically handled fields aren't defined
fields = [eve.DATE_CREATED, eve.LAST_UPDATED, eve.ETAG]
if resource_settings['versioning'] is True:
fields += [
self.config['VERSION'],
self.config['LATEST_VERSION'],
self.config['ID_FIELD'] + self.config['VERSION_ID_SUFFIX']]
if resource_settings['soft_delete'] is True:
fields += [self.config['DELETED']]
offenders = []
for field in fields:
if field in schema:
offenders.append(field)
if eve.ID_FIELD in schema and \
schema[eve.ID_FIELD]['type'] == 'objectid':
offenders.append(eve.ID_FIELD)
if offenders:
raise SchemaException('field(s) "%s" not allowed in "%s" schema '
'(they will be handled automatically).'
% (', '.join(offenders), resource))
# check data_relation rules
for field, ruleset in schema.items():
if 'data_relation' in ruleset:
if 'resource' not in ruleset['data_relation']:
raise SchemaException("'resource' key is mandatory for "
"the 'data_relation' rule in "
"'%s: %s'" % (resource, field))
if ruleset['data_relation'].get('embeddable', False):
# special care for data_relations with a version
value_field = ruleset['data_relation']['field']
if ruleset['data_relation'].get('version', False):
if 'schema' not in ruleset or \
value_field not in ruleset['schema'] or \
'type' not in ruleset['schema'][value_field]:
raise SchemaException(
"Must defined type for '%s' in schema when "
"declaring an embedded data_relation with"
" version." % value_field
)
# TODO are there other mandatory settings? Validate them here
def set_defaults(self):
""" When not provided, fills individual resource settings with default
or global configuration settings.
.. versionchanged:: 0.4
`versioning`
`VERSION` added to automatic projection (when applicable)
.. versionchanged:: 0.2
Setting of actual resource defaults is delegated to
_set_resource_defaults().
.. versionchanged:: 0.1.1
'default' values that could be assimilated to None (0, None, "")
would be ignored.
'dates' helper removed as datetime conversion is now handled by
the eve.methods.common.data_parse function.
.. versionchanged:: 0.1.0
'embedding'.
Support for optional HATEOAS.
.. versionchanged:: 0.0.9
'auth_username_field' renamed to 'auth_field'.
Always include automatic fields despite of datasource projections.
.. versionchanged:: 0.0.8
'mongo_write_concern'
.. versionchanged:: 0.0.7
'extra_response_fields'
.. versionchanged:: 0.0.6
'datasource[projection]'
'projection',
'allow_unknown'
.. versionchanged:: 0.0.5
'auth_username_field'
'filters',
'sorting',
'pagination'.
.. versionchanged:: 0.0.4
'defaults',
'datasource',
'public_methods',
'public_item_methods',
'allowed_roles',
'allowed_item_roles'.
.. versionchanged:: 0.0.3
`item_title` default value.
"""
for resource, settings in self.config['DOMAIN'].items():
self._set_resource_defaults(resource, settings)
def _set_resource_defaults(self, resource, settings):
""" Low-level method which sets default values for one resource.
.. versionchanged:: 0.6
Support for 'mongo_indexes'.
.. versionchanged:: 0.5
Don't set default projection if 'allow_unknown' is active (#497).
'internal_resource'
.. versionchanged:: 0.3
Set projection to None when schema is not provided for the resource.
Support for '_media' helper.
.. versionchanged:: 0.2
'resource_title',
'default_sort',
'embedded_fields'.
Support for endpoint-level authenticatoin classes.
"""
settings.setdefault('url', resource)
settings.setdefault('resource_methods',
self.config['RESOURCE_METHODS'])
settings.setdefault('public_methods',
self.config['PUBLIC_METHODS'])
settings.setdefault('allowed_roles', self.config['ALLOWED_ROLES'])
settings.setdefault('allowed_read_roles',
self.config['ALLOWED_READ_ROLES'])
settings.setdefault('allowed_write_roles',
self.config['ALLOWED_WRITE_ROLES'])
settings.setdefault('cache_control', self.config['CACHE_CONTROL'])
settings.setdefault('cache_expires', self.config['CACHE_EXPIRES'])
settings.setdefault('item_lookup_field',
self.config['ITEM_LOOKUP_FIELD'])
settings.setdefault('item_url', self.config['ITEM_URL'])
settings.setdefault('resource_title', settings['url'])
settings.setdefault('item_title',
resource.rstrip('s').capitalize())
settings.setdefault('item_lookup', self.config['ITEM_LOOKUP'])
settings.setdefault('public_item_methods',
self.config['PUBLIC_ITEM_METHODS'])
settings.setdefault('allowed_item_roles',
self.config['ALLOWED_ITEM_ROLES'])
settings.setdefault('allowed_item_read_roles',
self.config['ALLOWED_ITEM_READ_ROLES'])
settings.setdefault('allowed_item_write_roles',
self.config['ALLOWED_ITEM_WRITE_ROLES'])
settings.setdefault('allowed_filters',
self.config['ALLOWED_FILTERS'])
settings.setdefault('sorting', self.config['SORTING'])
settings.setdefault('embedding', self.config['EMBEDDING'])
settings.setdefault('embedded_fields', [])
settings.setdefault('pagination', self.config['PAGINATION'])
settings.setdefault('projection', self.config['PROJECTION'])
settings.setdefault('versioning', self.config['VERSIONING'])
settings.setdefault('soft_delete', self.config['SOFT_DELETE'])
settings.setdefault('internal_resource',
self.config['INTERNAL_RESOURCE'])
settings.setdefault('etag_ignore_fields', None)
# TODO make sure that this we really need the test below
if settings['item_lookup']:
item_methods = self.config['ITEM_METHODS']
else:
item_methods = eve.ITEM_METHODS
settings.setdefault('item_methods', item_methods)
settings.setdefault('auth_field',
self.config['AUTH_FIELD'])
settings.setdefault('allow_unknown', self.config['ALLOW_UNKNOWN'])
settings.setdefault('extra_response_fields',
self.config['EXTRA_RESPONSE_FIELDS'])
settings.setdefault('mongo_write_concern',
self.config['MONGO_WRITE_CONCERN'])
settings.setdefault('mongo_indexes', {})
settings.setdefault('hateoas',
self.config['HATEOAS'])
settings.setdefault('authentication', self.auth if self.auth else None)
# empty schemas are allowed for read-only access to resources
schema = settings.setdefault('schema', {})
self.set_schema_defaults(schema)
datasource = {}
settings.setdefault('datasource', datasource)
settings['datasource'].setdefault('source', resource)
settings['datasource'].setdefault('filter', None)
settings['datasource'].setdefault('default_sort', None)
projection = settings['datasource'].get('projection')
projection = projection or {}
exclusion = any((v for k, v in projection.items() if v == 0))
if not exclusion and len(schema) and \
settings['allow_unknown'] is False:
# enable retrieval of actual schema fields only. Eventual db
# fields not included in the schema won't be returned.
# despite projection, automatic fields are always included.
projection[self.config['ID_FIELD']] = 1
projection[self.config['LAST_UPDATED']] = 1
projection[self.config['DATE_CREATED']] = 1
projection[self.config['ETAG']] = 1
if settings['versioning'] is True:
projection[self.config['VERSION']] = 1
projection[
self.config['ID_FIELD'] +
self.config['VERSION_ID_SUFFIX']] = 1
projection.update(dict((field, 1) for (field) in schema))
if settings['soft_delete'] is True:
projection[self.config['DELETED']] = 1
else:
# all fields are returned.
projection = None
settings['datasource'].setdefault('projection', projection)
# 'defaults' helper set contains the names of fields with default
# values in their schema definition.
# TODO support default values for embedded documents.
settings['defaults'] = build_defaults(schema)
# list of all media fields for the resource
settings['_media'] = [field for field, definition in schema.items() if
definition.get('type') == 'media']
if settings['_media'] and not self.media:
raise ConfigException('A media storage class of type '
' eve.io.media.MediaStorage but be defined '
'for "media" fields to be properly stored.')
def set_schema_defaults(self, schema):
""" When not provided, fills individual schema settings with default
or global configuration settings.
:param schema: the resource schema to be initialized with default
values
.. versionchanged: 0.0.7
Setting the default 'field' value would not happen if the
'data_relation' was nested deeper than the first schema level (#60).
.. versionadded: 0.0.5
"""
# TODO fill schema{} defaults, like field type, etc.
# set default 'field' value for all 'data_relation' rulesets, however
# nested
for data_relation in list(extract_key_values('data_relation', schema)):
data_relation.setdefault('field', self.config['ID_FIELD'])
# TODO: find a way to autofill "self.app.config['VERSION']: \
# {'type': 'integer'}" for data_relations
@property
def api_prefix(self):
""" Prefix to API endpoints.
.. versionadded:: 0.2
"""
return api_prefix(self.config['URL_PREFIX'],
self.config['API_VERSION'])
def _add_resource_url_rules(self, resource, settings):
""" Builds the API url map for one resource. Methods are enabled for
each mapped endpoint, as configured in the settings.
.. versionchanged:: 0.5
Don't add resource to url rules if it's flagged as internal.
Strip regexes out of config.URLS helper. Closes #466.
.. versionadded:: 0.2
"""
self.config['SOURCES'][resource] = settings['datasource']
if settings['internal_resource']:
return
url = '%s/%s' % (self.api_prefix, settings['url'])
pretty_url = settings['url']
if '<' in pretty_url:
pretty_url = pretty_url[:pretty_url.index('<') + 1] + \
pretty_url[pretty_url.rindex(':') + 1:]
self.config['URLS'][resource] = pretty_url
# resource endpoint
endpoint = resource + "|resource"
self.add_url_rule(url, endpoint, view_func=collections_endpoint,
methods=settings['resource_methods'] + ['OPTIONS'])
# item endpoint
if settings['item_lookup']:
item_url = '%s/<%s:%s>' % (url, settings['item_url'],
settings['item_lookup_field'])
endpoint = resource + "|item_lookup"
self.add_url_rule(item_url, endpoint,
view_func=item_endpoint,
methods=settings['item_methods'] + ['OPTIONS'])
if 'PATCH' in settings['item_methods']:
# support for POST with X-HTTM-Method-Override header for
# clients not supporting PATCH. Also see item_endpoint() in
# endpoints.py
endpoint = resource + "|item_post_override"
self.add_url_rule(item_url, endpoint, view_func=item_endpoint,
methods=['POST'])
# also enable an alternative lookup/endpoint if allowed
lookup = settings.get('additional_lookup')
if lookup:
l_type = settings['schema'][lookup['field']]['type']
if l_type == 'integer':
item_url = '%s/<int:%s>' % (url, lookup['field'])
else:
item_url = '%s/<%s:%s>' % (url, lookup['url'],
lookup['field'])
endpoint = resource + "|item_additional_lookup"
self.add_url_rule(item_url, endpoint, view_func=item_endpoint,
methods=['GET', 'OPTIONS'])
def _init_url_rules(self):
""" Builds the API url map. Methods are enabled for each mapped
endpoint, as configured in the settings.
.. versionchanged:: 0.4
Renamed from '_add_url_rules' to '_init_url_rules' to make code more
DRY. Individual resource rules get built from register_resource now.
.. versionchanged:: 0.2
Delegate adding of resource rules to _add_resource_rules().
.. versionchanged:: 0.1.1
Simplified URL rules. Not using regexes anymore to return the
endpoint URL to the endpoint function. This allows for nested
endpoints to function properly.
.. versionchanged:: 0.0.9
Handle the case of 'additional_lookup' field being an integer.
.. versionchanged:: 0.0.5
Support for Cross-Origin Resource Sharing. 'OPTIONS' method is
explicitly routed to standard endpoints to allow for proper CORS
processing.
.. versionchanged:: 0.0.4
config.SOURCES. Maps resources to their datasources.
.. versionchanged:: 0.0.3
Support for API_VERSION as an endpoint prefix.
"""
# helpers
self.config['URLS'] = {} # maps resources to urls
self.config['SOURCES'] = {} # maps resources to their datasources
# we choose not to care about trailing slashes at all.
# Both '/resource/' and '/resource' will work, same with
# '/resource/<id>/' and '/resource/<id>'
self.url_map.strict_slashes = False
# home page (API entry point)
self.add_url_rule('%s/' % self.api_prefix, 'home',
view_func=home_endpoint, methods=['GET', 'OPTIONS'])
def register_resource(self, resource, settings):
""" Registers new resource to the domain.
Under the hood this validates given settings, updates default values
and adds necessary URL routes (builds api url map).
If there exists some resource with given name, it is overwritten.
:param resource: resource name.
:param settings: settings for given resource.
.. versionchanged:: 0.6
Support for 'mongo_indexes'.
.. versionchanged:: 0.4
Support for document versioning.
.. versionadded:: 0.2
"""
# this line only makes sense when we call this function outside of the
# standard Eve setup routine, but it doesn't hurt to still call it
self.config['DOMAIN'][resource] = settings
# set up resource
self._set_resource_defaults(resource, settings)
self._validate_resource_settings(resource, settings)
self._add_resource_url_rules(resource, settings)
# add rules for version control collections if appropriate
if settings['versioning'] is True:
versioned_resource = resource + self.config['VERSIONS']
self.config['DOMAIN'][versioned_resource] = \
copy.deepcopy(self.config['DOMAIN'][resource])
self.config['DOMAIN'][versioned_resource]['datasource']['source'] \
+= self.config['VERSIONS']
self.config['SOURCES'][versioned_resource] = \
copy.deepcopy(self.config['SOURCES'][resource])
self.config['SOURCES'][versioned_resource]['source'] += \
self.config['VERSIONS']
# the new versioned resource also needs URL rules
self._add_resource_url_rules(
versioned_resource,
self.config['DOMAIN'][versioned_resource]
)
# create the mongo db indexes
mongo_indexes = self.config['DOMAIN'][resource]['mongo_indexes']
if mongo_indexes:
for name, value in mongo_indexes.items():
if isinstance(value, tuple):
list_of_keys, index_options = value
else:
list_of_keys = value
index_options = {}
create_index(self, resource, name, list_of_keys, index_options)
def register_error_handlers(self):
""" Register custom error handlers so we make sure that all errors
return a parseable body.
.. versionadded:: 0.4
"""
for code in [400, 401, 403, 404, 405, 406, 409, 410, 422]:
self.error_handler_spec[None][code] = error_endpoint
def _init_oplog(self):
""" If enabled, configures the OPLOG endpoint.
.. versionadded:: 0.5
"""
name, endpoint, audit = (
self.config['OPLOG_NAME'],
self.config['OPLOG_ENDPOINT'],
self.config['OPLOG_AUDIT']
)
if endpoint:
settings = self.config['DOMAIN'].setdefault(name, {})
settings.setdefault('url', endpoint)
settings.setdefault('datasource', {'source': name})
# this endpoint is always read-only
settings['resource_methods'] = ['GET']
settings['item_methods'] = ['GET']
# schema is also fixed. it is needed because otherwise we
# would end up exposing the AUTH_FIELD when User-Restricted-
# Resource-Access is enabled.
settings['schema'] = {
'r': {},
'o': {},
'i': {},
}
if audit:
settings['schema'].update(
{
'ip': {},
'c': {}
}
)
def _init_media_endpoint(self):
endpoint = self.config['MEDIA_ENDPOINT']
if endpoint:
media_url = '%s/%s/<%s:_id>' % (self.api_prefix,
endpoint,
self.config['MEDIA_URL'])
self.add_url_rule(media_url, 'media',
view_func=media_endpoint, methods=['GET'])
|
amagdas/eve
|
eve/flaskapp.py
|
Python
|
bsd-3-clause
| 34,822
|
from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.util import truncate_name
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BinaryField': 'bytea',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if self.connection.settings_dict['TEST_CHARSET']:
return "WITH ENCODING '%s'" % self.connection.settings_dict['TEST_CHARSET']
return ''
def sql_indexes_for_field(self, model, f, style):
output = []
if f.db_index or f.unique:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(tablespace)
if tablespace_sql:
tablespace_sql = ' ' + tablespace_sql
else:
tablespace_sql = ''
def get_index_sql(index_name, opclass=''):
return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(index_name,self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(db_table)) + ' ' +
"(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
"%s;" % tablespace_sql)
if not f.unique:
output = [get_index_sql('%s_%s' % (db_table, f.column))]
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
db_type = f.db_type(connection=self.connection)
if db_type.startswith('varchar'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' varchar_pattern_ops'))
elif db_type.startswith('text'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
return output
|
edisonlz/fruit
|
web_project/base/site-packages/django/db/backends/postgresql_psycopg2/creation.py
|
Python
|
apache-2.0
| 3,814
|
from json import dumps
def test_ffflash_access_for_with_empty_paths(tmpdir, fffake):
f = fffake(
tmpdir.join('api_file.json'),
nodelist=tmpdir.join('nodelist.json'),
rankfile=tmpdir.join('rankfile.json'),
sidecars=[tmpdir.join('side.yaml'), tmpdir.join('cars.yaml')]
)
assert f.access_for('api') is False
assert f.access_for('nodelist') is False
assert f.access_for('sidecars') is False
assert f.access_for('rankfile') is False
assert f.access_for('whatever') is False
assert tmpdir.remove() is None
def test_ffflash_access_for_with_correct_paths(tmpdir, fffake):
apifile = tmpdir.join('api_file.json')
apifile.write_text(dumps({'a': 'b'}), 'utf-8')
nodelist = tmpdir.join('nodelist.json')
nodelist.write_text(dumps({'a': 'b'}), 'utf-8')
assert tmpdir.listdir() == [apifile, nodelist]
f = fffake(
apifile, nodelist=nodelist, rankfile=tmpdir.join('rankfile.json'),
sidecars=[tmpdir.join('side.yaml'), tmpdir.join('cars.yaml')]
)
assert f.access_for('api') is True
assert f.access_for('nodelist') is True
assert f.access_for('sidecars') is True
assert f.access_for('rankfile') is True
assert f.access_for('whatever') is False
assert tmpdir.remove() is None
|
spookey/ffflash
|
tests/main/test_ffflash_access_for.py
|
Python
|
bsd-3-clause
| 1,297
|
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import os
import re
import sys
import urllib
"""Logpuzzle exercise
Given an apache logfile, find the puzzle urls and download the images.
Here's what a puzzle url looks like:
10.254.254.28 - - [06/Aug/2007:00:13:48 -0700] "GET /~foo/puzzle-bar-aaab.jpg HTTP/1.0" 302 528 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6"
"""
# Returns a list of the puzzle urls from the given log file,
# extracting the hostname from the filename itself.
# Screens out duplicate urls and returns the urls sorted into
# increasing order."""
def read_urls(filename):
print "filename: " + filename.split("_")[1]
http_path = "http://" + filename.split("_")[1]
print "filename: " + http_path
f = open(filename, 'r')
# Feed the file text into findall(); it returns a list of all the found strings
# years = re.findall(r'(Popularity in)\s([\d]{4})(</h)', f.read())
lines_with_puzzle = 0
strings = []
for line in f: ## iterates over the lines of the file
if line.find('puzzle') == -1: continue;
lines_with_puzzle += 1;
match = re.search(r'(GET\s)([a-z/-]*\.jpg)(\sHTTP)', line)
if match == None : continue
image_slice_url = http_path + match.group(2)
if image_slice_url in strings: continue;
strings.append(image_slice_url)
print lines_with_puzzle
print len(strings)
return sorted(strings)
# +++your code here+++
# Given the urls already in the correct order, downloads
# each image into the given directory.
# Gives the images local filenames img0, img1, and so on.
# Creates an index.html in the directory
# with an img tag to show each local image file.
# Creates the directory if necessary.
def download_images(img_urls, dest_dir):
if os.path.exists(dest_dir) != True:
os.mkdir(dest_dir)
f = open(dest_dir + '/index.html', 'w')
index_html_content = "<verbatim><html><body>"
i = 0;
for img_url in img_urls:
img_name = "img" + str(i) + ".jpg"
print '\n' + "Retrieving" + img_url + " saving as " + img_name + " ..."
urllib.urlretrieve(img_url, dest_dir + "/" + img_name)
index_html_content += "<img src=\"" + img_name + "\">"
i+=1
index_html_content += "</body></html>"
f.write(index_html_content)
f.close()
def main():
args = sys.argv[1:]
if not args:
print 'usage: [--todir dir] logfile '
sys.exit(1)
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
img_urls = read_urls(args[0])
if todir:
download_images(img_urls, todir)
else:
print '\n'.join(img_urls)
if __name__ == '__main__':
main()
|
vancouverwill/google-python-exercises
|
logpuzzle/logpuzzle.py
|
Python
|
apache-2.0
| 2,823
|
# coding=utf-8
# TestBenchmarkSwiftDictionary.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.lldbbench import *
import lldbsuite.test.decorators as decorators
import lldbsuite.test.lldbutil as lldbutil
class TestBenchmarkSwiftDictionary(BenchBase):
mydir = TestBase.compute_mydir(__file__)
@decorators.benchmarks_test
def test_run_command(self):
"""Benchmark the Swift dictionary data formatter"""
self.build()
self.data_formatter_commands()
def setUp(self):
# Call super's setUp().
BenchBase.setUp(self)
def data_formatter_commands(self):
"""Benchmark the Swift dictionary data formatter"""
self.runCmd("file " + getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
bkpt = self.target().FindBreakpointByID(
lldbutil.run_break_set_by_source_regexp(
self, "break here"))
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synth clear', check=False)
self.runCmd(
"settings set target.max-children-count 256",
check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
sw = Stopwatch()
sw.start()
self.expect('frame variable -A dict', substrs=['[300]', '300'])
sw.stop()
print("time to print: %s" % (sw))
|
apple/swift-lldb
|
packages/Python/lldbsuite/test/benchmarks/swiftdictionary/TestBenchmarkSwiftDictionary.py
|
Python
|
apache-2.0
| 2,461
|
from lnst.Common.DeviceError import DeviceError
from lnst.Devices.Device import Device
from lnst.Devices.BridgeDevice import BridgeDevice
from lnst.Devices.OvsBridgeDevice import OvsBridgeDevice
from lnst.Devices.BondDevice import BondDevice
from lnst.Devices.TeamDevice import TeamDevice
from lnst.Devices.MacvlanDevice import MacvlanDevice
from lnst.Devices.VlanDevice import VlanDevice
from lnst.Devices.VxlanDevice import VxlanDevice
from lnst.Devices.VtiDevice import VtiDevice, Vti6Device
from lnst.Devices.VethDevice import VethDevice, PairedVethDevice
from lnst.Devices.VethPair import VethPair
from lnst.Devices.MacsecDevice import MacsecDevice
from lnst.Devices.RemoteDevice import RemoteDevice, remotedev_decorator
device_classes = [
("Device", Device),
("BridgeDevice", BridgeDevice),
("OvsBridgeDevice", OvsBridgeDevice),
("MacvlanDevice", MacvlanDevice),
("VlanDevice", VlanDevice),
("VxlanDevice", VxlanDevice),
("VethDevice", VethDevice),
("PairedVethDevice", PairedVethDevice),
("VtiDevice", VtiDevice),
("Vti6Device", Vti6Device),
("BondDevice", BondDevice),
("TeamDevice", TeamDevice),
("MacsecDevice", MacsecDevice)]
for name, cls in device_classes:
globals()[name] = remotedev_decorator(cls)
#Remove the PairedVethDevice from globals... doesn't make sense to use it on
#it's own, not even for isinstance... VethDevice works fine for that
del globals()["PairedVethDevice"]
|
jpirko/lnst
|
lnst/Devices/__init__.py
|
Python
|
gpl-2.0
| 1,504
|
# Django settings for redeer project.
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'redeer.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Parse database configuration from $DATABASE_URL
import dj_database_url
heroku_config = dj_database_url.config()
if heroku_config:
DATABASES['default'] = heroku_config
STATIC_ROOT = '/app/static_root'
SECRET_KEY = os.environ['SECRET_KEY']
else: # local dev
DEBUG = True
STATIC_ROOT = 'static_root'
SECRET_KEY = 'for testing only'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
try:
DOMAIN_NAME = os.environ['DOMAIN_NAME']
ALLOWED_HOSTS = [DOMAIN_NAME]
except KeyError:
ALLOWED_HOSTS =[]
TIME_ZONE = 'Europe/London'
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
# CWD = os.path.dirname(os.path.realpath(__file__))
# STATIC_ROOT = os.path.join(CWD, 'static_root')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'redeer.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'redeer.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'south',
'redeer.api',
'redeer.feeds',
'redeer.gui',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
davidszotten/redeer
|
redeer/settings.py
|
Python
|
mit
| 4,840
|
# The content of this file was generated using the Python profile of libCellML 0.2.0.
from enum import Enum
from math import *
__version__ = "0.3.0"
LIBCELLML_VERSION = "0.2.0"
STATE_COUNT = 3
VARIABLE_COUNT = 19
class VariableType(Enum):
VARIABLE_OF_INTEGRATION = 1
STATE = 2
CONSTANT = 3
COMPUTED_CONSTANT = 4
ALGEBRAIC = 5
EXTERNAL = 6
VOI_INFO = {"name": "time", "units": "millisecond", "component": "environment", "type": VariableType.VARIABLE_OF_INTEGRATION}
STATE_INFO = [
{"name": "m", "units": "dimensionless", "component": "sodium_channel_m_gate", "type": VariableType.STATE},
{"name": "h", "units": "dimensionless", "component": "sodium_channel_h_gate", "type": VariableType.STATE},
{"name": "n", "units": "dimensionless", "component": "potassium_channel_n_gate", "type": VariableType.STATE}
]
VARIABLE_INFO = [
{"name": "V", "units": "millivolt", "component": "membrane", "type": VariableType.EXTERNAL},
{"name": "g_L", "units": "milliS_per_cm2", "component": "leakage_current", "type": VariableType.CONSTANT},
{"name": "Cm", "units": "microF_per_cm2", "component": "membrane", "type": VariableType.CONSTANT},
{"name": "E_R", "units": "millivolt", "component": "membrane", "type": VariableType.CONSTANT},
{"name": "g_K", "units": "milliS_per_cm2", "component": "potassium_channel", "type": VariableType.CONSTANT},
{"name": "g_Na", "units": "milliS_per_cm2", "component": "sodium_channel", "type": VariableType.CONSTANT},
{"name": "i_Stim", "units": "microA_per_cm2", "component": "membrane", "type": VariableType.ALGEBRAIC},
{"name": "E_L", "units": "millivolt", "component": "leakage_current", "type": VariableType.COMPUTED_CONSTANT},
{"name": "i_L", "units": "microA_per_cm2", "component": "leakage_current", "type": VariableType.ALGEBRAIC},
{"name": "E_Na", "units": "millivolt", "component": "sodium_channel", "type": VariableType.COMPUTED_CONSTANT},
{"name": "i_Na", "units": "microA_per_cm2", "component": "sodium_channel", "type": VariableType.EXTERNAL},
{"name": "alpha_m", "units": "per_millisecond", "component": "sodium_channel_m_gate", "type": VariableType.ALGEBRAIC},
{"name": "beta_m", "units": "per_millisecond", "component": "sodium_channel_m_gate", "type": VariableType.ALGEBRAIC},
{"name": "alpha_h", "units": "per_millisecond", "component": "sodium_channel_h_gate", "type": VariableType.ALGEBRAIC},
{"name": "beta_h", "units": "per_millisecond", "component": "sodium_channel_h_gate", "type": VariableType.ALGEBRAIC},
{"name": "E_K", "units": "millivolt", "component": "potassium_channel", "type": VariableType.COMPUTED_CONSTANT},
{"name": "i_K", "units": "microA_per_cm2", "component": "potassium_channel", "type": VariableType.ALGEBRAIC},
{"name": "alpha_n", "units": "per_millisecond", "component": "potassium_channel_n_gate", "type": VariableType.EXTERNAL},
{"name": "beta_n", "units": "per_millisecond", "component": "potassium_channel_n_gate", "type": VariableType.ALGEBRAIC}
]
def leq_func(x, y):
return 1.0 if x <= y else 0.0
def geq_func(x, y):
return 1.0 if x >= y else 0.0
def and_func(x, y):
return 1.0 if bool(x) & bool(y) else 0.0
def create_states_array():
return [nan]*STATE_COUNT
def create_variables_array():
return [nan]*VARIABLE_COUNT
def initialise_states_and_constants(states, variables):
variables[1] = 0.3
variables[2] = 1.0
variables[3] = 0.0
variables[4] = 36.0
variables[5] = 120.0
states[0] = 0.05
states[1] = 0.6
states[2] = 0.325
def compute_computed_constants(variables):
variables[7] = variables[3]-10.613
variables[9] = variables[3]-115.0
variables[15] = variables[3]+12.0
def compute_rates(voi, states, rates, variables, external_variable):
variables[0] = external_variable(voi, states, rates, variables, 0)
variables[11] = 0.1*(variables[0]+25.0)/(exp((variables[0]+25.0)/10.0)-1.0)
variables[12] = 4.0*exp(variables[0]/18.0)
rates[0] = variables[11]*(1.0-states[0])-variables[12]*states[0]
variables[13] = 0.07*exp(variables[0]/20.0)
variables[14] = 1.0/(exp((variables[0]+30.0)/10.0)+1.0)
rates[1] = variables[13]*(1.0-states[1])-variables[14]*states[1]
variables[17] = external_variable(voi, states, rates, variables, 17)
variables[18] = 0.125*exp(variables[0]/80.0)
rates[2] = variables[17]*(1.0-states[2])-variables[18]*states[2]
def compute_variables(voi, states, rates, variables, external_variable):
variables[6] = -20.0 if and_func(geq_func(voi, 10.0), leq_func(voi, 10.5)) else 0.0
variables[8] = variables[1]*(variables[0]-variables[7])
variables[10] = external_variable(voi, states, rates, variables, 10)
variables[16] = variables[4]*pow(states[2], 4.0)*(variables[0]-variables[15])
|
nickerso/libcellml
|
tests/resources/generator/hodgkin_huxley_squid_axon_model_1952/model.external.py
|
Python
|
apache-2.0
| 4,820
|
class InvalidMetadataError(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class NoFilesToBackUpError(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
harsh8398/pybackup
|
pybackup/errors.py
|
Python
|
mit
| 244
|
from flask.ext.wtf import Form
from wtforms import StringField, TextField
from wtforms.validators import DataRequired
class addForm(Form):
subject = StringField('subject', validators=[DataRequired()])
code = StringField('code', validators=[DataRequired()])
homework = TextField('homework', validators=[DataRequired()])
|
Cuiyn/018works
|
app/forms.py
|
Python
|
gpl-2.0
| 331
|
import sys
from os.path import join, isfile
import os
import urllib.request
from zipfile import ZipFile
from win32com.shell import shell, shellcon
# download chrome driver
if not isfile('chromedriver.exe'):
chrome_driver_zipfilename = 'chromedriver_win32.zip'
chrome_driver_url='http://chromedriver.storage.googleapis.com/2.11/' + chrome_driver_zipfilename
urllib.request.urlretrieve(chrome_driver_url, chrome_driver_zipfilename)
# extract chrome driver
zip_ref = ZipFile(chrome_driver_zipfilename, 'r')
zip_ref.extractall(os.path.dirname(os.path.realpath(__file__)))
zip_ref.close()
# cleanup chrome driver zip
os.remove(chrome_driver_zipfilename)
# get url and refresh interval
# delete existing shortcuts
# install new shortcut
script_path = os.path.realpath(__file__)
working_dir = os.path.dirname(script_path)
pyw_executable = join(working_dir, "python3", "pythonw.exe")
shortcut_filename = "webkiosk - %s.lnk" % url
shortcut_paths =
if sys.argv[1] == '-install':
# Get paths to the desktop and start menu
desktop_path = shell.SHGetFolderPath(0, shellcon.CSIDL_DESKTOPDIRECTORY, None, 0)
startmenu_path = shell.SHGetFolderPath(0, shellcon.CSIDL_STARTMENU, None, 0)
startup_path = shell.SHGetFolderPath(0, shellcon.CSIDL_STARTUP, None, 0)
# Create shortcuts.
for path in [desktop_path, startmenu_path, startup_path]:
create_shortcut(pyw_executable,
"Web Kiosk for %s, refreshed every %d seconds" % (url, interval),
join(path, shortcut_filename),
script_path,
working_dir)
|
bigjonroberts/webkiosk
|
install.py
|
Python
|
mit
| 1,636
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# Copyright (C) Gabriel Potter <gabriel@potter.fr>
# This program is published under a GPLv2 license
"""
Python 2 and 3 link classes.
"""
from __future__ import absolute_import
import base64
import binascii
import gzip
import struct
import sys
import scapy.modules.six as six
###########
# Python3 #
###########
def lambda_tuple_converter(func):
"""
Converts a Python 2 function as
lambda (x,y): x + y
In the Python 3 format:
lambda x,y : x + y
"""
if func is not None and func.__code__.co_argcount == 1:
return lambda *args: func(args[0] if len(args) == 1 else args)
else:
return func
if six.PY2:
bytes_encode = plain_str = str
chb = lambda x: x if isinstance(x, str) else chr(x)
orb = ord
def raw(x):
"""Builds a packet and returns its bytes representation.
This function is and always be cross-version compatible"""
if hasattr(x, "__bytes__"):
return x.__bytes__()
return bytes(x)
else:
def raw(x):
"""Builds a packet and returns its bytes representation.
This function is and always be cross-version compatible"""
return bytes(x)
def bytes_encode(x):
"""Ensure that the given object is bytes.
If the parameter is a packet, raw() should be preferred.
"""
if isinstance(x, str):
return x.encode()
return bytes(x)
if sys.version_info[0:2] <= (3, 4):
def plain_str(x):
"""Convert basic byte objects to str"""
if isinstance(x, bytes):
return x.decode(errors="ignore")
return str(x)
else:
# Python 3.5+
def plain_str(x):
"""Convert basic byte objects to str"""
if isinstance(x, bytes):
return x.decode(errors="backslashreplace")
return str(x)
def chb(x):
"""Same than chr() but encode as bytes."""
return struct.pack("!B", x)
def orb(x):
"""Return ord(x) when not already an int."""
if isinstance(x, int):
return x
return ord(x)
def bytes_hex(x):
"""Hexify a str or a bytes object"""
return binascii.b2a_hex(bytes_encode(x))
def hex_bytes(x):
"""De-hexify a str or a byte object"""
return binascii.a2b_hex(bytes_encode(x))
def base64_bytes(x):
"""Turn base64 into bytes"""
if six.PY2:
return base64.decodestring(x)
return base64.decodebytes(bytes_encode(x))
def bytes_base64(x):
"""Turn bytes into base64"""
if six.PY2:
return base64.encodestring(x).replace('\n', '')
return base64.encodebytes(bytes_encode(x)).replace(b'\n', b'')
if six.PY2:
from StringIO import StringIO
def gzip_decompress(x):
"""Decompress using gzip"""
with gzip.GzipFile(fileobj=StringIO(x), mode='rb') as fdesc:
return fdesc.read()
def gzip_compress(x):
"""Compress using gzip"""
buf = StringIO()
with gzip.GzipFile(fileobj=buf, mode='wb') as fdesc:
fdesc.write(x)
return buf.getvalue()
else:
gzip_decompress = gzip.decompress
gzip_compress = gzip.compress
# Typing compatibility
try:
# Only required if using mypy-lang for static typing
from typing import Optional, List, Union, Callable, Any, AnyStr, Tuple, \
Sized, Dict, Pattern, cast
except ImportError:
# Let's make some fake ones.
def cast(_type, obj):
return obj
class _FakeType(object):
# make the objects subscriptable indefinetly
def __getitem__(self, item):
return _FakeType()
Optional = _FakeType()
Union = _FakeType()
Callable = _FakeType()
List = _FakeType()
Dict = _FakeType()
Any = _FakeType()
AnyStr = _FakeType()
Tuple = _FakeType()
Pattern = _FakeType()
class Sized(object):
pass
|
6WIND/scapy
|
scapy/compat.py
|
Python
|
gpl-2.0
| 4,048
|
'''
Created on Sep 11, 2014
@author: Yintao Song
'''
from __future__ import print_function, division, absolute_import
from .globals import *
import numpy as np
from math import cos, sin, radians
def lp2B(a, b, c, beta):
"""lattice parameters to base, for monoclinic"""
B = np.array([[a,0,0],[0,b,0],[c*np.cos(radians(beta)), 0, c*np.sin(radians(beta))]]).T
return B
@jit('boolean(f8[:], f8[:, :], i4, f8)')
def in_list(ary, arys, start, tol):
assert start < len(arys)
for i in range(start, len(arys)):
b = arys[i]
if max(np.abs(ary - b)) < tol:
return True
return False
def rm_dup(arys, tol=1e-7):
"""remove duplicated arrays"""
dup = []
for i in range(len(arys) - 1):
a = arys[i]
if in_list(a, arys, i+1, tol):
dup.append(i)
new_arys = [arys[i] for i in range(len(arys)) if not i in dup]
return new_arys
@jit("f8[:](f8[:])")
def mv2cell(p):
for i in range(len(p)):
if p[i] < 0: p[i] += 1
if p[i] >= 1: p[i] -= 1
return p
|
yintaosong/spacegroup
|
bonds/utils.py
|
Python
|
mit
| 1,050
|
"""
This file is meant to make it easy to load the main features of
MoviePy by simply typing:
>>> from moviepy.editor import *
In particular it will load many effects from the video.fx and audio.fx
folders and turn them into VideoClip methods, so that instead of
>>> clip.fx( vfx.resize, 2 ) or equivalently vfx.resize(clip, 2)
we can write
>>> clip.resize(2)
It also starts a PyGame session (if PyGame is installed) and enables
clip.preview().
"""
__all__ = [
"afx",
"AudioClip",
"AudioFileClip",
"BitmapClip",
"clips_array",
"ColorClip",
"CompositeAudioClip",
"CompositeVideoClip",
"concatenate_audioclips",
"concatenate_videoclips",
"convert_to_seconds",
"download_webfile",
"ffmpeg_tools",
"ImageClip",
"ImageSequenceClip",
"ipython_display",
"TextClip",
"transfx",
"vfx",
"VideoClip",
"VideoFileClip",
"videotools",
]
# Note that these imports could have been performed in the __init__.py
# file, but this would make the loading of moviepy slower.
import os
import inspect
# Hide the welcome message from pygame: https://github.com/pygame/pygame/issues/542
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "1"
# Clips
from .video.io.VideoFileClip import VideoFileClip
from .video.io.ImageSequenceClip import ImageSequenceClip
from .video.io.downloader import download_webfile
from .video.VideoClip import VideoClip, ImageClip, ColorClip, TextClip, BitmapClip
from .video.compositing.CompositeVideoClip import CompositeVideoClip, clips_array
from .video.compositing.concatenate import concatenate_videoclips
from .audio.AudioClip import AudioClip, CompositeAudioClip, concatenate_audioclips
from .audio.io.AudioFileClip import AudioFileClip
# FX
import moviepy.video.fx as vfx
import moviepy.audio.fx as afx
import moviepy.video.compositing.transitions as transfx
# Tools
import moviepy.video.tools as videotools
import moviepy.video.io.ffmpeg_tools as ffmpeg_tools
from .video.io.html_tools import ipython_display
from .tools import convert_to_seconds
try:
from .video.io.sliders import sliders
__all__.append("sliders")
except ImportError:
pass
# Transforms the effects into Clip methods so that
# they can be called with clip.resize(width=500) instead of
# clip.fx(vfx.resize, width=500)
audio_fxs = inspect.getmembers(afx, inspect.isfunction)
video_fxs = (
inspect.getmembers(vfx, inspect.isfunction)
+ inspect.getmembers(transfx, inspect.isfunction)
+ audio_fxs
)
for name, function in video_fxs:
setattr(VideoClip, name, function)
for name, function in audio_fxs:
setattr(AudioClip, name, function)
# adds easy ipython integration
VideoClip.ipython_display = ipython_display
AudioClip.ipython_display = ipython_display
# -----------------------------------------------------------------
# Previews: try to import pygame, else make methods which raise
# exceptions saying to install PyGame
# Add methods preview and show (only if pygame installed)
try:
from moviepy.video.io.preview import show, preview
except ImportError:
def preview(self, *args, **kwargs):
"""NOT AVAILABLE : clip.preview requires Pygame installed."""
raise ImportError("clip.preview requires Pygame installed")
def show(self, *args, **kwargs):
"""NOT AVAILABLE : clip.show requires Pygame installed."""
raise ImportError("clip.show requires Pygame installed")
VideoClip.preview = preview
VideoClip.show = show
try:
from moviepy.audio.io.preview import preview
except ImportError:
def preview(self, *args, **kwargs):
""" NOT AVAILABLE : clip.preview requires Pygame installed."""
raise ImportError("clip.preview requires Pygame installed")
AudioClip.preview = preview
|
kerstin/moviepy
|
moviepy/editor.py
|
Python
|
mit
| 3,760
|
# import libraries
import numpy, pylab
from pylab import *
# plot DOF convergence graph
axis('equal')
pylab.title("Error convergence")
pylab.xlabel("CPU time")
pylab.ylabel("Error [%]")
data = numpy.loadtxt("conv_cpu_h1_aniso.dat")
x = data[:, 0]
y = data[:, 1]
loglog(x, y, "-s", label="h1-FEM (aniso)")
data = numpy.loadtxt("conv_cpu_h1_iso.dat")
x = data[:, 0]
y = data[:, 1]
loglog(x, y, "-s", label="h1-FEM (iso)")
data = numpy.loadtxt("conv_cpu_h2_aniso.dat")
x = data[:, 0]
y = data[:, 1]
loglog(x, y, "-s", label="h2-FEM (aniso)")
data = numpy.loadtxt("conv_cpu_h2_iso.dat")
x = data[:, 0]
y = data[:, 1]
loglog(x, y, "-s", label="h2-FEM (iso)")
data = numpy.loadtxt("conv_cpu_hp_aniso.dat")
x = data[:, 0]
y = data[:, 1]
loglog(x, y, "-s", label="hp-FEM (aniso)")
data = numpy.loadtxt("conv_cpu_hp_iso.dat")
x = data[:, 0]
y = data[:, 1]
loglog(x, y, "-s", label="hp-FEM (iso)")
legend()
# finalize
show()
|
hanak/hermes2d
|
doc/src/img/smooth-aniso-x/plot_graph_cpu.py
|
Python
|
gpl-2.0
| 917
|
#!/usr/bin/env python
"""server-net.py
Network server to handle network connections and pass commands
to projector emulators
"""
__version__ = '0.0.2'
_v = __version__.split(".")
__version_hex__ = int(_v[0]) << 24 | \
int(_v[1]) << 16 | \
int(_v[2]) << 8
import thread
import threading
import time
import logging
import argparse
import os
import sys
from time import sleep
from socket import *
if os.geteuid() < 1000:
print "Best to run this as a normal user"
sys.exit()
# Setup logging to console
FORMAT = '%(asctime)-15s [%(name)s]: %(message)s'
logging.basicConfig(format=FORMAT, datefmt='%Y-%m-%d %H:%M:%S')
log = logging.getLogger()
log.setLevel(logging.WARN)
log.info("Initializing net server")
# Get arguments
use_text = """server-net.py [options]
Network server to emulate a projector for testing programs
that control projectors.
Currently only PJLink is supported.
"""
parser = argparse.ArgumentParser(prog="server-net.py",
usage=use_text)
parser.add_argument('-d', action='count',
help="Increase debugging for each -d - max 2")
parser.add_argument('--debug', action='store',
help='Set debugging level: DEBUG INFO WARN ERROR CRITICAL')
parser.add_argument('-f', '--fail', action='store', default=None,
help='Set random fail check in minutes')
parser.add_argument('--host', action='store',
help='Set interface to listen to - default all')
parser.add_argument('--lamps', action="store", default=1,
help='Specify number of lamps installed')
parser.add_argument('-p', '--password', action='store',
help='Set password for projector authorization. Set to "TESTME" to use test algorithm')
parser.add_argument('--port', action='store',
help='Set a different port to use other than default')
parser.add_argument('-u', '--user', action='store',
help="Set username for projector authorization")
parser.add_argument('-t', '--type',
default='PJLINK',
help='type of projector to emulate - default="PJLINK"')
parser.add_argument('-v', '--version',
action='version',
version='%(prog)s ' + str( __version__))
args = parser.parse_args()
# Time to process
if args.debug is not None:
if args.debug.upper() not in "DEBUG INFO WARN ERROR CRITICAL":
print "--debug: invalid level: %s" % args.debug.upper()
print "Valid levels are DEBUG INFO WARN ERROR CRITICAL"
else:
log.setLevel(logging._levelNames[args.debug.upper()])
elif args.d is not None:
d = log.getEffectiveLevel() - (args.d * 10)
if d < logging.DEBUG: d = logging.DEBUG
log.setLevel(logging._levelNames[d])
print "Log level set to %s" % logging.getLevelName(log.getEffectiveLevel())
log.debug('Parsing arguments')
log.debug("args: %s" % args)
log.debug("Setting projector type")
# Only pass the arguments that your type can use. For example, if your
# projector does not need a username, then don't use the 'user' option.
if args.type.upper() == 'PJLINK':
log.info("Loading PJLink projector settings")
from servers.PJLink import *
projector = Projector(user=args.user,
password=args.password,
failcheck=args.fail,
lamps=args.lamps)
if args.host is None:
HOST = '' # Listen on all interfaces
log.info("Listening on all interfaces")
else:
try:
HOST = gethostbyname(args.host)
log.info("Listening on interface %s" % HOST)
except:
HOST = '127.0.0.1'
log.warn("Unkown host: %s" % args.host)
log.warn("Setting to localhost (127.0.0.1)")
if args.port is not None:
PORT = int(args.port)
if PORT < 1024 and os.geteuid() >= 1000:
print "Cannot use ports below 1024 as non-root user"
sys.exit(1)
log.info("Setting constants")
CR = chr(0x0D) # \r
LF = chr(0x0A) # \n
# SUFFIX needs to be defined in emulator - normally CR but can be CR+LF
log.info("Setting main variables")
ADDR = (HOST, PORT)
serversock = None # Holds port binding
main_running = False # Keep track of main service running
log.info("Creating server functions")
def send(s, d):
"""send(s,d): Sends data d to socket s"""
log.debug("%s : Sending '%s'" % (repr(addr), d))
c.send("%s%s" % (d, SUFFIX))
def handler(clientsock, addr):
"""handler(clientsock, addr)
Handler for new connection. Sends socket/address to Projector() class
for connection setup.
"""
log.info("New socket created for %s" % repr(addr))
log.debug("handler: Setting %s timeout to %s" % (repr(addr), TIMEOUT_MAX))
clientsock.settimeout(TIMEOUT_MAX)
log.info("handler: Initializing projector connection")
v, data = projector.client_connect(clientsock, addr)
log.debug("handler(): Connection validated: %s - Initial data: '%s'" % \
(v, data))
if not v:
log.error("Authentication failure - closing connection to %s" % \
repr(addr))
projector.client_disconnect(addr)
clientsock.close()
return
if data is None:
log.debug("handler(): Authenticated request with no command")
else:
_d = projector.handle_request(data)
log.debug("handler(): Athenticated connection - sending %s" % _d)
clientsock.send("%s%s" % (_d, SUFFIX))
handler_running = True
while handler_running:
try:
data = clientsock.recv(BUFFER).strip()
if not data:
break
elif '\xff\xf4\xff\xfd\x06' in data:
# ^C pressed from telnet
log.info("%s: Received ^C - closing connection" % repr(addr))
break
else:
log.debug("handler(): Calling projector.handle_request(%s)" % data)
_d = projector.handle_request(data)
log.debug("handler(): Sending '%s'" % _d)
clientsock.send("%s%s" % (_d, SUFFIX))
except timeout, e:
log.info("Timeout - closing connection to %s" % repr(addr))
break
except error, e:
log.info("Socket error - closing connection to %s" % repr(addr))
break
projector.client_disconnect(addr)
log.debug("handler() closing connection to %s" % repr(addr))
clientsock.close()
handler_running = False
if __name__ == "__main__":
log.info("Starting main connection service address: %s" % repr(ADDR))
serversock = socket(AF_INET, SOCK_STREAM)
serversock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
serversock.bind(ADDR)
log.debug("Socket bound to address %s" % repr(serversock.getsockname()))
serversock.listen(5)
main_running = True
projector.start()
while main_running:
try:
log.info("MAIN: Waiting for connection - listening on port %s" % \
PORT)
clientsock, addr = serversock.accept()
thread.start_new_thread(handler, (clientsock, addr))
except KeyboardInterrupt:
log.warn("Keyboard interrupt - stopping")
projector.stop()
handler_running = False
main_running = False
|
alisonken1/pyProjector
|
projector/test/server-net.py
|
Python
|
gpl-2.0
| 7,205
|
# Copyright (C) 2019 Compassion CH
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import html
from odoo import models, _
class MailMessage(models.Model):
"""
Extend the mail composer so that it can send message to archived partner,
and put back the selected partner in the claim in case it was not linked.
"""
_inherit = "mail.message"
def get_message_quote(self):
"""
Helper to quote a message from another. This will return the message
formatted for quoting.
:return: HTML of quotted message
"""
self.ensure_one()
lib_subject = _("Subject")
lib_from = _("From")
lib_message = _("Original Message")
header1 = '<div style="font-size:10pt;color:#1f497d">' "<br></div>"
header2 = '<div style="font-size:10pt;color:#500050;">'
header3 = "----" + lib_message + "----"
br = "<br />"
email_from = "no-email"
if self.email_from:
email_from = self.email_from
email_from = (
"<b>"
+ lib_from
+ "</b>:"
+ str(html.escape(email_from).encode("ascii", "xmlcharrefreplace"))
)
mail_date = "<b>Date</b>:" + str(self.date)
body = ""
if self.body and self.body != u"":
body = self.body.replace("#1f497d", "#500050") + "</div>"
subject = "<b>" + lib_subject + "</b>:"
if self.subject and self.subject != u"":
subject = (
"<b>"
+ lib_subject
+ "</b>:"
+ str(
html.escape(self.subject).encode("ascii", "xmlcharrefreplace")
or self.record_name
or ""
)
)
return (
header1
+ header2
+ header3
+ br
+ str(email_from)
+ br
+ subject
+ br
+ mail_date
+ 2 * br
+ body
)
|
CompassionCH/compassion-modules
|
crm_request/models/mail_message.py
|
Python
|
agpl-3.0
| 2,051
|
"""
@file static_tree_like_tables.py
"""
from ..DatabaseSetup.base import Base
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
class AA(Base):
__tablename__ = "table_name_a_a"
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
@property
def serialize(self):
"""Return object data in easily serializable format"""
return {
"name": self.name,
"id": self.id}
|
ernestyalumni/Propulsion
|
T1000/T1000/Model/static_tree_like_tables.py
|
Python
|
gpl-2.0
| 504
|
#__author__ = 'FuWei'
from __future__ import print_function, division
import sys
import pdb
from os import listdir
from os.path import isfile, join
# from table import *
class o(object):
def __init__(i, **filed):
i.__dict__.update(filed)
return i
class Row(object):
def __init__(i,data):
i.x = data[:-1]
i.y = data[-1]
i.w = 0
def __len__(i): return len(i.x)+1
def weights(i,l,m):
if i.y == 1:
i.w = 1/(2*l) # Y, crater
else:
i.w = 1/(2*m) # N, non-crater
def read(files):
Y, N = 0,0
rows = []
path = "../data/features"
# datafiles = [ join(path,f) for f in listdir(path) if isfile(join(path,f))]
for data in [join(path,f) for f in files]:
print(data)
with open(data,"r") as f:
line = f.readline() #header, ski\
for line in f.readlines():
line = map(float, line.split(","))
if line[-1] == 1:
Y +=1
else:
N +=1
rows.append(Row(line))
map(lambda x:x.weights(Y,N),rows)
return rows
def changeHeader(f = "./data/treat"):
all_files = [ (join(f,path)) for path in listdir(f) if isfile(join(f,path))]
for one in all_files:
train = ""
tune = ""
f = open(one,"r")
count = 0
while True:
line = f.readline()
if not line:
break
else:
# pdb.set_trace()
if count == 0:
header = line.split(",")
newheader = header[:]
if "$" not in header[0]:
for item in header[:-1]:
newheader +=["$"+item]
newheader +=["$<"+header[-1]]
else:
train += ",".join(newheader)
tune +=",".join(newheader)
if count %2 == 0:
train +=",".join(line.split(","))
else:
tune +=",".join(line.split(","))
# pdb.set_trace()
count +=1
f = open(one+"train.csv","w")
f.write(train)
f.close()
f = open(one+"tune.csv","w")
f.write(tune)
f.close()
# print (content)
#
#
# content = f.readlines()
# header = content[0].split(",")
# newheader = []
# for item in header[:-1]:
# if "$" in item:
# newheader +=[item]
# else:
# newheader +=["$"+item]
# newheader += ["$<"+header[-1]]
# content[0] = ",".join(newheader)
# # pdb.set_trace()
#
# for row in content:
# with open(one+".csv","a") as f:
# f.write(row)
# pdb.set_trace()
# def addn(f = "./data/treat"):
# all_files = [ (join(f,path)) for path in listdir(f) if isfile(join(f,path))]
# for one in all_files:
# content = []
# with open(one,"r") as f:
# # pdb.set_trace()
# content = f.readlines()
# for j in content:
# content[0]+=[j+"\n"]
#
#
# for row in content:
# with open(one+".csv","a") as f:
# f.write(row)
if __name__ == "__main__":
changeHeader()
# addn()
|
ST-Data-Mining/crater
|
wei/data.py
|
Python
|
mit
| 2,913
|
#!/usr/bin/env python
import argparse
import csv
import os.path
import sys
try:
__import__('yomeka')
except ImportError:
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
__import__('yomeka')
from yomeka.classic.omeka_classic_rest_api_client import OmekaClassicRestApiClient
class YomekaCli(object):
def __init__(self, args, client, output_file):
self.__args = args
self.__client = client
self.__output_file = output_file
@staticmethod
def __csv_encode(str_):
return str_.encode('utf-8')
def _item_command(self):
self.__print_items((self.__client.get_item(self.__args.item_id),))
def _items_command(self):
self.__print_items(self.__client.get_all_items())
@classmethod
def main(cls):
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument('--api-key', required=True)
argument_parser.add_argument('--endpoint-url', required=True)
argument_parser.add_argument('--format', default='csv')
argument_parser.add_argument('-o', '--output-file')
subparsers = argument_parser.add_subparsers()
items_parser = subparsers.add_parser('item', help='print one item')
items_parser.add_argument('item_id', type=int)
items_parser.set_defaults(command='item')
items_parser = subparsers.add_parser('items', help='print all items')
items_parser.set_defaults(command='items')
args = argument_parser.parse_args()
client = OmekaClassicRestApiClient(api_key=args.api_key, endpoint_url=args.endpoint_url)
if args.output_file is not None:
output_file = open(args.output_file, 'w+b')
else:
output_file = sys.stdout
inst = cls(args=args, client=client, output_file=output_file)
getattr(inst, '_' + args.command + '_command')()
def __print_items(self, items):
if self.__args.format != 'csv':
raise NotImplementedError(self.__args.format)
csv_header = (
'item_id',
'item_added',
'item_featured',
'item_modified',
'item_public',
'item_tags',
'item_type',
'element_set_name',
'element_name',
'element_text'
)
csv_writer = csv.writer(self.__output_file)
csv_writer.writerow(csv_header)
for item in items:
for element_text in item.element_texts:
csv_row = []
csv_row.append(item.id)
csv_row.append(item.added)
csv_row.append(item.featured)
csv_row.append(item.modified)
csv_row.append(item.public)
csv_row.append('|'.join(self.__csv_encode(tag.name) for tag in item.tags))
csv_row.append(item.item_type.name if item.item_type is not None else '')
csv_row.append(self.__csv_encode(element_text.element_set.name))
csv_row.append(self.__csv_encode(element_text.element.name))
csv_row.append(self.__csv_encode(element_text.text))
assert len(csv_row) == len(csv_header)
csv_writer.writerow(csv_row)
self.__output_file.flush()
assert __name__ == '__main__'
YomekaCli.main()
|
minorg/yomeka
|
bin/yomeka_cli.py
|
Python
|
bsd-2-clause
| 3,363
|
#! /usr/bin/env python2
#######################################################################
# #
# Copyright 2014 Cristian C Lalescu #
# #
# This file is part of py3Dpdf. #
# #
# py3Dpdf is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, #
# or (at your option) any later version. #
# #
# py3Dpdf is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with py3Dpdf. If not, see <http://www.gnu.org/licenses/> #
# #
#######################################################################
import numpy as np
import py3Dpdf
import py3Dpdf.tvtk_tools
from base import get_turbulent_scalar
def main():
fx = get_turbulent_scalar()[3]
fy = get_turbulent_scalar()[3]
x, y, z, fz = get_turbulent_scalar()
grid1D = np.linspace(
-np.pi, np.pi,
fx.shape[0],
endpoint = False)
data = py3Dpdf.tvtk_tools.get_isosurface_data(
field = fx,
x1Dgrid = grid1D,
y1Dgrid = grid1D,
z1Dgrid = grid1D,
values = [0.0])
if py3Dpdf.found_mathgl:
# first, vector field on grid
gr = py3Dpdf.npGraph()
gr.set_limits(
points = {'x': grid1D,
'y': grid1D,
'z': grid1D})
gr.Axis('xyz')
gr.Box()
gr.Label('x', 'x', 0)
gr.Label('y', 'y', 0)
gr.Label('z', 'z', 0)
gr.vector_field(
points = np.transpose(
np.array([x, y, z]),
axes = [1, 2, 3, 0]),
vectors = np.transpose(
np.array([fx, fy, fz]),
axes = [1, 2, 3, 0]),
style = '<',
options = {'meshnum': 8})
gr.Legend()
gr.WritePNG('mgl_grid_vec_test.png')
gr.WritePRC('mgl_grid_vec_test.prc')
# second, isosurface with normals
gr.Clf()
gr.triangulated_surface(
points = data[0]['points'],
triangles = data[0]['triangles'],
style = 'r')
gr.vector_field(
points = data[0]['centers'],
vectors = data[0]['gradients'],
style = 'g<',
options = {'value' : 0.1})
gr.WritePNG('mgl_vec_test.png')
gr.WritePRC('mgl_vec_test.prc')
return None
if __name__ == '__main__':
main()
|
chichilalescu/py3Dpdf
|
examples/vector_field.py
|
Python
|
gpl-3.0
| 3,252
|
import json
import webapp2
import logging
from google.appengine.ext import ndb
import ndbTools
import modelInventory
import modelUser
import modelStore
import modelSupplier
import modelPaymentMethod
import modelPurchaser
import modelSaleTransaction
import modelTest
class RestHandler(webapp2.RequestHandler):
def dispatch(self):
super(RestHandler, self).dispatch()
def SendJson(self, r):
self.response.headers['content-type'] = 'text/plain'
self.response.write(json.dumps(r))
def HandleException(self, exception):
logging.exception(exception)
self.response.write('ERROR OCCURED')
if isinstance(exception, webapp2.HTTPException):
self.response.set_status(exception.code)
else:
self.response.set_status(500)
class UserHandler(RestHandler):
def post(self):
data = json.loads(self.request.body)
user = modelUser.AddUser(data)
r = ndbTools.AsDict(user.key)
self.SendJson(r)
def get(self):
queryParams = json.loads(self.request.get("queryParams"))
print queryParams
users = modelUser.QueryUser(queryParams)
users = [ndbTools.AsDict(user.key) for user in users]
self.SendJson(users)
class UserKeyHandler(RestHandler):
def post(self, key):
r = json.loads(self.request.body)
user = ndbTools.SaveData(key, r)
#user = modelUser.SaveUser(key, r)
r = ndbTools.AsDict(user.key)
self.SendJson(r)
def get(self, key):
user = ndbTools.GetData(key)
r = ndbTools.AsDict(user.key)
self.SendJson(r)
class StoreHandler(RestHandler):
def post(self):
r = json.loads(self.request.body)
store = modelStore.AddStore(r)
r = ndbTools.AsDict(store.key)
self.SendJson(r)
def get(self):
store = modelStore.GetStore(self.request.get('key'))
r = ndb.Tools.AsDict(store.key)
self.SendJson(r)
class StoreKeyHandler(RestHandler):
def post(self, key):
r = json.loads(self.request.body)
store = ndbTools.SaveData(key, r)
r = ndbTools.AsDict(store.key)
self.SendJson
def get(self, key):
user = ndbTools.GetData(key)
r = ndbTools.AsDict(user.key)
self.SendJson(r)
class SupplierHandler(RestHandler):
def post(self):
r = json.loads(self.request.body)
supplier = modelSupplier.AddSupplier(r)
r = ndbTools.AsDict(supplier.key)
self.SendJson(r)
def get(self):
store = modelStore.GetStore(self.request.get('key'))
r = ndb.Tools.AsDict(store.key)
self.SendJson(r)
class SupplierKeyHandler(RestHandler):
def post(self, key):
r = json.loads(self.request.body)
store = ndbTools.SaveData(key, r)
r = ndbTools.AsDict(store.key)
self.SendJson
def get(self, key):
user = ndbTools.GetData(key)
r = ndbTools.AsDict(user.key)
self.SendJson(r)
class PaymentMethodHandler(RestHandler):
def post(self):
r = json.loads(self.request.body)
paymentMethod = modelPaymentMethod.AddPaymentMethod(r)
r = ndbTools.AsDict(paymentMethod.key)
self.SendJson(r)
def get(self):
#print self.request.get('key')
keys = ['agpkZXZ-ZWNvdW50chILEgVTdG9yZRiAgICAgIDFCgw', 'agpkZXZ-ZWNvdW50cigLEgVTdG9yZRiAgICAgIDFCgwLEglQdXJjaGFzZXIYgICAgICAxQkM']
for key in keys:
paymentMethod = modelPaymentMethod.GetPaymentMethod(key)
print '---'
print key
print paymentMethod
for p in paymentMethod:
print '~~~'
print p.purchaser.get().firstName
print '~~~'
#print ndbTools.AsDict(paymentMethod.key)
print '---'
#r = ndb.Tools.AsDict(store.key)
#self.SendJson(r)
class PurchaserHandler(RestHandler):
def post(self):
r = json.loads(self.request.body)
purchaser = modelPurchaser.AddPurchaser(r)
r = ndbTools.AsDict(purchaser.key)
self.SendJson(r)
def get(self):
store = modelStore.GetStore(self.request.get('key'))
r = ndb.Tools.AsDict(store.key)
self.SendJson(r)
class InventoryHandler(RestHandler):
def post(self):
r = json.loads(self.request.body)
modelInventory.AddInventory(r['inventory'], r['items'])
#paymentMethod = modelPaymentMethod.AddPaymentMethod(r)
#r = ndbTools.AsDict(paymentMethod.key)
#self.SendJson(r)
def get(self):
inventory = modelInventory.QueryInventory(self.request.get('storeKey'))
r = [ndbTools.AsDict(i.key) for i in inventory]
self.SendJson(r)
class InventoryKeyHandler(RestHandler):
def post(self):
r = json.loads(self.request.body)
modelInventory.AddInventory(r['inventory'], r['items'])
#paymentMethod = modelPaymentMethod.AddPaymentMethod(r)
#r = ndbTools.AsDict(paymentMethod.key)
#self.SendJson(r)
def get(self, key):
user = ndbTools.GetData(key)
r = ndbTools.AsDict(user.key)
self.SendJson(r)
def delete(self, itemKey):
ndbTools.DeleteData(itemKey)
class SaleTransactionHandler(RestHandler):
def post(self):
r = json.loads(self.request.body)
saleTransaction = modelSaleTransaction.AddSaleTransaction(r)
print '---'
print saleTransaction
print '---'
r = ndbTools.AsDict(saleTransaction.key)
print '###'
print r
self.SendJson(r)
def get(self):
store = modelStore.GetStore(self.request.get('key'))
r = ndb.Tools.AsDict(store.key)
self.SendJson(r)
class TestHandler(RestHandler):
def post(self):
data = json.loads(self.request.body)
print '~~~request body~~~'
print data
print '~~~~~~~~~~~~~~~~~~'
test = modelTest.AddTest(data)
print '###json###'
print test
class LookupKeyHandler(RestHandler):
def get(self, key):
print key
print 'hi'
key = ndb.Key(urlsafe=key)
entity = key.get()
if entity:
r = ndbTools.AsDict(key)
self.SendJson(r)
else:
self.HandleException(Exception())
APP = webapp2.WSGIApplication([
('/rest/lookup/key/(.*)', LookupKeyHandler),
('/rest/user', UserHandler),
('/rest/user/(.*)', UserKeyHandler),
('/rest/store', StoreHandler),
('/rest/store/(.*)', StoreKeyHandler),
('/rest/supplier', SupplierHandler),
('/rest/supplier/(.*)', SupplierKeyHandler),
('/rest/paymentMethod', PaymentMethodHandler),
('/rest/purchaser', PurchaserHandler),
('/rest/inventory', InventoryHandler),
('/rest/inventory/(.*)', InventoryKeyHandler),
('/rest/saleTransaction', SaleTransactionHandler),
('/rest/test', TestHandler)
], debug=True)
|
jmtoung/26p
|
main.py
|
Python
|
apache-2.0
| 6,943
|
# coding=utf-8
"""Daily searcher module."""
from __future__ import unicode_literals
import logging
import threading
from builtins import object
from datetime import date, datetime, timedelta
from medusa import app, common
from medusa.db import DBConnection
from medusa.helper.common import try_int
from medusa.helper.exceptions import MultipleShowObjectsException
from medusa.logger.adapters.style import BraceAdapter
from medusa.network_timezones import (
app_timezone,
network_dict,
parse_date_time,
update_network_dict,
)
from medusa.search.queue import DailySearchQueueItem
from medusa.show.show import Show
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
class DailySearcher(object): # pylint:disable=too-few-public-methods
"""Daily search class."""
def __init__(self):
"""Initialize the class."""
self.lock = threading.Lock()
self.amActive = False
def run(self, force=False): # pylint:disable=too-many-branches
"""
Run the daily searcher, queuing selected episodes for search.
:param force: Force search
"""
if self.amActive:
log.debug('Daily search is still running, not starting it again')
return
elif app.forced_search_queue_scheduler.action.is_forced_search_in_progress() and not force:
log.debug('Manual search is running. Unable to start Daily search')
return
elif app.proper_finder_scheduler.action.amActive and not force:
log.debug('Find propers is running. Unable to start Daily search')
return
self.amActive = True
if not network_dict:
update_network_dict()
cur_time = datetime.now(app_timezone)
cur_date = (
date.today() + timedelta(days=1 if network_dict else 2)
).toordinal()
main_db_con = DBConnection()
episodes_from_db = main_db_con.select(
b'SELECT indexer, showid, airdate, season, episode '
b'FROM tv_episodes '
b'WHERE status = ? AND (airdate <= ? and airdate > 1)',
[common.UNAIRED, cur_date]
)
new_releases = []
series_obj = None
for db_episode in episodes_from_db:
indexer_id = db_episode[b'indexer']
series_id = db_episode[b'showid']
try:
if not series_obj or series_id != series_obj.indexerid:
series_obj = Show.find_by_id(app.showList, indexer_id, series_id)
# for when there is orphaned series in the database but not loaded into our show list
if not series_obj or series_obj.paused:
continue
except MultipleShowObjectsException:
log.info('ERROR: expected to find a single show matching {id}',
{'id': series_id})
continue
if series_obj.airs and series_obj.network:
# This is how you assure it is always converted to local time
show_air_time = parse_date_time(db_episode[b'airdate'], series_obj.airs, series_obj.network)
end_time = show_air_time.astimezone(app_timezone) + timedelta(minutes=try_int(series_obj.runtime, 60))
# filter out any episodes that haven't finished airing yet,
if end_time > cur_time:
continue
cur_ep = series_obj.get_episode(db_episode[b'season'], db_episode[b'episode'])
with cur_ep.lock:
cur_ep.splitted_status_status = series_obj.default_ep_status if cur_ep.season else common.SKIPPED
log.info(
'Setting status ({status}) for show airing today: {name} {special}', {
'name': cur_ep.pretty_name(),
'status': common.statusStrings[cur_ep.splitted_status_status],
'special': '(specials are not supported)' if not cur_ep.season else '',
}
)
new_releases.append(cur_ep.get_sql())
if new_releases:
main_db_con = DBConnection()
main_db_con.mass_action(new_releases)
# queue episode for daily search
app.search_queue_scheduler.action.add_item(
DailySearchQueueItem(force=force)
)
self.amActive = False
|
fernandog/Medusa
|
medusa/search/daily.py
|
Python
|
gpl-3.0
| 4,443
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Data/AssetDigestEntry.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Data/AssetDigestEntry.proto',
package='POGOProtos.Data',
syntax='proto3',
serialized_pb=_b('\n&POGOProtos/Data/AssetDigestEntry.proto\x12\x0fPOGOProtos.Data\"w\n\x10\x41ssetDigestEntry\x12\x10\n\x08\x61sset_id\x18\x01 \x01(\t\x12\x13\n\x0b\x62undle_name\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\x03 \x01(\x03\x12\x10\n\x08\x63hecksum\x18\x04 \x01(\r\x12\x0c\n\x04size\x18\x05 \x01(\x05\x12\x0b\n\x03key\x18\x06 \x01(\x0c\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ASSETDIGESTENTRY = _descriptor.Descriptor(
name='AssetDigestEntry',
full_name='POGOProtos.Data.AssetDigestEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='asset_id', full_name='POGOProtos.Data.AssetDigestEntry.asset_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bundle_name', full_name='POGOProtos.Data.AssetDigestEntry.bundle_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version', full_name='POGOProtos.Data.AssetDigestEntry.version', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='checksum', full_name='POGOProtos.Data.AssetDigestEntry.checksum', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='size', full_name='POGOProtos.Data.AssetDigestEntry.size', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key', full_name='POGOProtos.Data.AssetDigestEntry.key', index=5,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=59,
serialized_end=178,
)
DESCRIPTOR.message_types_by_name['AssetDigestEntry'] = _ASSETDIGESTENTRY
AssetDigestEntry = _reflection.GeneratedProtocolMessageType('AssetDigestEntry', (_message.Message,), dict(
DESCRIPTOR = _ASSETDIGESTENTRY,
__module__ = 'POGOProtos.Data.AssetDigestEntry_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Data.AssetDigestEntry)
))
_sym_db.RegisterMessage(AssetDigestEntry)
# @@protoc_insertion_point(module_scope)
|
polzy/PokeManager
|
pogo/POGOProtos/Data/AssetDigestEntry_pb2.py
|
Python
|
mit
| 4,026
|
#-------------------------------------------------------------------------------
# Name: process_DNAI
# Purpose:
#
# Author: Ramakrishna
#
# Created: 17/04/2014
# Copyright: (c) Ramakrishna 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
from bs4 import BeautifulSoup
from datetime import date
import re
import my_caching
def process(source_cd, base_url, data):
try:
record= []
page_content = data.find('div', {'class' : 'leftcolumn white'})
if page_content != None:
review_anchors = page_content.find_all('a', {'class' : 'more'})
for movie in review_anchors:
if movie != None:
row = {}
if movie.has_attr('href'):
url = movie['href']
if url != None:
response = my_caching.get_content(source_cd, url)
if response != None:
soup = BeautifulSoup(response)
article = soup.find('div', {'class' : 'leftcolumn white'})
if article != None:
name = article.find('h2')
if name != None:
row['name'] = name.text.strip()
row['source_cd'] = source_cd
row['rvw_link'] = url
row['year'] = date.today().year
article = article.find('div', {'style' : 'text-align:justify;'})
pColl = article.find_all('p')
if pColl != None:
for p in pColl:
p_text = p.text.strip()
#Writing last paragraph of as review content
if p_text != '':
row['rvw_smy'] = p_text
record.append(row)
return record
except Exception as e:
print(e.__doc__)
print(e.args)
|
brkrishna/freelance
|
bolly_reviews/process_ETC.py
|
Python
|
gpl-2.0
| 2,339
|
# Third-party
import astropy.units as u
import numpy as np
import pytest
# Custom
from ....potential import (NullPotential, NFWPotential,
HernquistPotential, KuzminPotential,
ConstantRotatingFrame, StaticFrame)
from ....dynamics import PhaseSpacePosition, combine
from ....units import UnitSystem, galactic
# Project
from ..core import DirectNBody
class TestDirectNBody:
def setup(self):
self.usys = UnitSystem(u.pc, u.Unit(1e-5*u.Myr),
u.Unit(1e6*u.Msun), u.radian)
pot_particle2 = HernquistPotential(m=1e6*u.Msun, c=0.1*u.pc,
units=self.usys)
vcirc = pot_particle2.circular_velocity([1, 0, 0.]*u.pc).to(u.km/u.s)
self.particle_potentials = [NullPotential(self.usys), pot_particle2]
w0_2 = PhaseSpacePosition(pos=[10, 0, 0] * u.kpc,
vel=[0, 83, 0] * u.km/u.s)
w0_1 = PhaseSpacePosition(pos=w0_2.xyz + [1, 0, 0] * u.pc,
vel=w0_2.v_xyz + [0, 1., 0] * vcirc)
self.w0 = combine((w0_1, w0_2))
self.ext_pot = NFWPotential(m=1e11, r_s=10, units=galactic)
def test_directnbody_init(self):
# another unit system for testing
usys2 = UnitSystem(u.pc, u.Unit(1e-3*u.Myr),
u.Unit(1e6*u.Msun), u.radian)
particle_potentials_None = [None] + self.particle_potentials[1:]
# Different VALID ways to initialize
nbody = DirectNBody(self.w0,
particle_potentials=self.particle_potentials)
nbody = DirectNBody(self.w0,
particle_potentials=particle_potentials_None)
nbody = DirectNBody(self.w0,
particle_potentials=self.particle_potentials,
external_potential=self.ext_pot)
nbody = DirectNBody(self.w0,
particle_potentials=self.particle_potentials,
external_potential=self.ext_pot, units=usys2)
nbody = DirectNBody(self.w0, particle_potentials=[None, None],
units=usys2)
nbody = DirectNBody(self.w0, particle_potentials=[None, None],
external_potential=self.ext_pot)
# Different INVALID ways to initialize
with pytest.raises(TypeError):
DirectNBody("sdf", particle_potentials=self.particle_potentials)
with pytest.raises(ValueError):
DirectNBody(self.w0,
particle_potentials=self.particle_potentials[:1])
# MAX_NBODY1 = 65536+1
# w0_max = combine([self.w0[0]]*MAX_NBODY1)
# with pytest.raises(NotImplementedError):
# DirectNBody(w0_max, particle_potentials=[None]*MAX_NBODY1)
with pytest.raises(ValueError):
DirectNBody(self.w0, particle_potentials=[None, None])
py_ext_pot = KuzminPotential(m=1e10*u.Msun, a=0.5*u.kpc, units=galactic)
with pytest.raises(ValueError):
DirectNBody(self.w0, particle_potentials=self.particle_potentials,
external_potential=py_ext_pot)
def test_directnbody_integrate(self):
# TODO: this is really a unit test, but we should have some functional tests
# that check that the orbit integration is making sense!
# First, compare with/without mass with no external potential:
nbody1 = DirectNBody(self.w0,
particle_potentials=[None, None],
units=self.usys)
nbody2 = DirectNBody(self.w0,
particle_potentials=self.particle_potentials,
units=self.usys)
orbits1 = nbody1.integrate_orbit(dt=1*self.usys['time'],
t1=0, t2=1*u.Myr)
orbits2 = nbody2.integrate_orbit(dt=1*self.usys['time'],
t1=0, t2=1*u.Myr)
dx0 = orbits1[:, 0].xyz - orbits2[:, 0].xyz
dx1 = orbits1[:, 1].xyz - orbits2[:, 1].xyz
assert u.allclose(np.abs(dx1), 0*u.pc, atol=1e-13*u.pc)
assert np.abs(dx0).max() > 50*u.pc
# Now compare with/without mass with external potential:
nbody1 = DirectNBody(self.w0,
particle_potentials=[None, None],
units=self.usys,
external_potential=self.ext_pot)
nbody2 = DirectNBody(self.w0,
particle_potentials=self.particle_potentials,
units=self.usys,
external_potential=self.ext_pot)
orbits1 = nbody1.integrate_orbit(dt=1*self.usys['time'],
t1=0, t2=1*u.Myr)
orbits2 = nbody2.integrate_orbit(dt=1*self.usys['time'],
t1=0, t2=1*u.Myr)
dx0 = orbits1[:, 0].xyz - orbits2[:, 0].xyz
dx1 = orbits1[:, 1].xyz - orbits2[:, 1].xyz
assert u.allclose(np.abs(dx1), 0*u.pc, atol=1e-13*u.pc)
assert np.abs(dx0).max() > 50*u.pc
def test_directnbody_integrate_dontsaveall(self):
# If we set save_all = False, only return the final positions:
nbody1 = DirectNBody(self.w0,
particle_potentials=self.particle_potentials,
units=self.usys,
external_potential=self.ext_pot,
save_all=False)
nbody2 = DirectNBody(self.w0,
particle_potentials=self.particle_potentials,
units=self.usys,
external_potential=self.ext_pot,
save_all=True)
w1 = nbody1.integrate_orbit(dt=1*self.usys['time'],
t1=0, t2=1*u.Myr)
orbits = nbody2.integrate_orbit(dt=1*self.usys['time'],
t1=0, t2=1*u.Myr)
w2 = orbits[-1]
assert u.allclose(w1.xyz, w2.xyz)
assert u.allclose(w1.v_xyz, w2.v_xyz)
def test_directnbody_integrate_rotframe(self):
# Now compare with/without mass with external potential:
frame = ConstantRotatingFrame(Omega=[0,0,1]*self.w0[0].v_y/self.w0[0].x,
units=self.usys)
nbody = DirectNBody(self.w0,
particle_potentials=self.particle_potentials,
units=self.usys,
external_potential=self.ext_pot,
frame=frame)
nbody2 = DirectNBody(self.w0,
particle_potentials=self.particle_potentials,
units=self.usys,
external_potential=self.ext_pot)
orbits = nbody.integrate_orbit(dt=1*self.usys['time'],
t1=0, t2=1*u.Myr)
orbits_static = orbits.to_frame(StaticFrame(self.usys))
orbits2 = nbody2.integrate_orbit(dt=1*self.usys['time'],
t1=0, t2=1*u.Myr)
assert u.allclose(orbits_static.xyz, orbits_static.xyz)
assert u.allclose(orbits2.v_xyz, orbits2.v_xyz)
|
adrn/gary
|
gala/dynamics/nbody/tests/test_nbody.py
|
Python
|
mit
| 7,395
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""CMSIS-NN integration tests: Softmax"""
import sys
import itertools
import numpy as np
import pytest
import tvm.testing
from tvm import relay
from tvm.relay.op.contrib import cmsisnn
from utils import (
skip_if_no_reference_system,
make_module,
count_num_calls,
get_range_for_dtype_str,
)
from tests.python.relay.aot.aot_test_utils import (
AOTTestModel,
AOT_CORSTONE300_RUNNER,
generate_ref_data,
compile_and_run,
)
def make_model(
shape, in_dtype, out_dtype, in_zero_point, in_scale, out_zero_point=-128, out_scale=1.0 / 256
):
"""Create a Relay Function / network model"""
a = relay.var("in0", shape=shape, dtype=in_dtype)
dequantize = relay.qnn.op.dequantize(
a,
input_scale=relay.const(in_scale, "float32"),
input_zero_point=relay.const(in_zero_point, "int32"),
)
softmax = relay.nn.softmax(dequantize)
model = relay.qnn.op.quantize(
softmax,
output_scale=relay.const(out_scale, "float32"),
output_zero_point=relay.const(out_zero_point, "int32"),
out_dtype=out_dtype,
)
return model
@skip_if_no_reference_system
@pytest.mark.parametrize(["zero_point", "scale"], [[33, 0.256], [-64, 0.0128]])
@tvm.testing.requires_cmsisnn
def test_op_int8(zero_point, scale):
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_CORSTONE300_RUNNER
dtype = "int8"
shape = [1, 16, 16, 3]
model = make_model(shape, dtype, dtype, zero_point, scale)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
# validate pattern matching
attrs = [
cmsisnn_mod[var.name_hint].attrs
for var in cmsisnn_mod.get_global_vars()
if cmsisnn_mod[var.name_hint].attrs
]
assert any(attrs), "At least one function with external attributes was expected."
compilers = [
key == "Compiler" and value == "cmsis-nn" for attr in attrs for key, value in attr.items()
]
assert any(compilers), "Module does not contain function for cmsisnn target."
assert count_num_calls(orig_mod) == count_num_calls(
cmsisnn_mod
), "Number of calls changed during partitioning"
# validate the output
in_min, in_max = get_range_for_dtype_str(dtype)
np.random.seed(0)
input_data = np.random.randint(in_min, high=in_max, size=shape, dtype=dtype)
inputs = {"in0": input_data}
params = {}
output_list = generate_ref_data(orig_mod["main"], inputs, params)
compile_and_run(
AOTTestModel(module=cmsisnn_mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
def parameterize_for_invalid_model(test):
in_dtype = ["uint8", "int8"]
out_dtype = ["uint8", "int8"]
zero_point = [-128, 64]
scale = [1.0 / 256, 0.2]
out_zero_point = [-128, 33]
out_scale = [1.0 / 256, 0.2]
all_combinations = itertools.product(
in_dtype, out_dtype, zero_point, scale, out_zero_point, out_scale
)
all_combinations = filter(
lambda parameters: not (
parameters[0] == "int8"
and parameters[1] == "int8"
and parameters[4] == -128
and parameters[5] == 1.0 / 256
),
all_combinations,
)
return pytest.mark.parametrize(
["in_dtype", "out_dtype", "zero_point", "scale", "out_zero_point", "out_scale"],
all_combinations,
)(test)
@parameterize_for_invalid_model
@tvm.testing.requires_cmsisnn
def test_invalid_parameters(in_dtype, out_dtype, zero_point, scale, out_zero_point, out_scale):
model = make_model(
[1, 16, 16, 3], in_dtype, out_dtype, zero_point, scale, out_zero_point, out_scale
)
orig_mod = make_module(model)
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(orig_mod)
attrs = [
cmsisnn_mod[var.name_hint].attrs
for var in cmsisnn_mod.get_global_vars()
if cmsisnn_mod[var.name_hint].attrs
]
assert not any(attrs), "No function should have an external attribute."
if __name__ == "__main__":
sys.exit(pytest.main([__file__] + sys.argv[1:]))
|
Laurawly/tvm-1
|
tests/python/contrib/test_cmsisnn/test_softmax.py
|
Python
|
apache-2.0
| 4,949
|
# Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import cStringIO
import hashlib
import os
import time
from oslo.config import cfg
from nova import conductor
from nova import db
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import test
from nova.tests import fake_instance
from nova import utils
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import utils as libvirt_utils
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
@contextlib.contextmanager
def intercept_log_messages():
try:
mylog = logging.getLogger('nova')
stream = cStringIO.StringIO()
handler = logging.logging.StreamHandler(stream)
handler.setFormatter(logging.ContextFormatter())
mylog.logger.addHandler(handler)
yield stream
finally:
mylog.logger.removeHandler(handler)
class ImageCacheManagerTestCase(test.NoDBTestCase):
def setUp(self):
super(ImageCacheManagerTestCase, self).setUp()
self.stock_instance_names = set(['instance-00000001',
'instance-00000002',
'instance-00000003',
'banana-42-hamster'])
def test_read_stored_checksum_missing(self):
self.stubs.Set(os.path, 'exists', lambda x: False)
csum = imagecache.read_stored_checksum('/tmp/foo', timestamped=False)
self.assertIsNone(csum)
def test_read_stored_checksum(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
csum_input = '{"sha1": "fdghkfhkgjjksfdgjksjkghsdf"}\n'
fname = os.path.join(tmpdir, 'aaa')
info_fname = imagecache.get_info_filename(fname)
f = open(info_fname, 'w')
f.write(csum_input)
f.close()
csum_output = imagecache.read_stored_checksum(fname,
timestamped=False)
self.assertEqual(csum_input.rstrip(),
'{"sha1": "%s"}' % csum_output)
def test_read_stored_checksum_legacy_essex(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
old_fname = fname + '.sha1'
f = open(old_fname, 'w')
f.write('fdghkfhkgjjksfdgjksjkghsdf')
f.close()
csum_output = imagecache.read_stored_checksum(fname,
timestamped=False)
self.assertEqual(csum_output, 'fdghkfhkgjjksfdgjksjkghsdf')
self.assertFalse(os.path.exists(old_fname))
info_fname = imagecache.get_info_filename(fname)
self.assertTrue(os.path.exists(info_fname))
def test_list_base_images(self):
listing = ['00000001',
'ephemeral_0_20_None',
'17d1b00b81642842e514494a78e804e9a511637c_5368709120.info',
'00000004']
images = ['e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
'e97222e91fc4241f49a7f520d1dcf446751129b3',
'17d1b00b81642842e514494a78e804e9a511637c',
'17d1b00b81642842e514494a78e804e9a511637c_5368709120',
'17d1b00b81642842e514494a78e804e9a511637c_10737418240']
listing.extend(images)
self.stubs.Set(os, 'listdir', lambda x: listing)
self.stubs.Set(os.path, 'isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
self.flags(instances_path='/var/lib/nova/instances')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_base_images(base_dir)
sanitized = []
for ent in image_cache_manager.unexplained_images:
sanitized.append(ent.replace(base_dir + '/', ''))
self.assertEqual(sorted(sanitized), sorted(images))
expected = os.path.join(base_dir,
'e97222e91fc4241f49a7f520d1dcf446751129b3')
self.assertIn(expected, image_cache_manager.unexplained_images)
expected = os.path.join(base_dir,
'17d1b00b81642842e514494a78e804e9a511637c_'
'10737418240')
self.assertIn(expected, image_cache_manager.unexplained_images)
unexpected = os.path.join(base_dir, '00000004')
self.assertNotIn(unexpected, image_cache_manager.unexplained_images)
for ent in image_cache_manager.unexplained_images:
self.assertTrue(ent.startswith(base_dir))
self.assertEqual(len(image_cache_manager.originals), 2)
expected = os.path.join(base_dir,
'17d1b00b81642842e514494a78e804e9a511637c')
self.assertIn(expected, image_cache_manager.originals)
unexpected = os.path.join(base_dir,
'17d1b00b81642842e514494a78e804e9a511637c_'
'10737418240')
self.assertNotIn(unexpected, image_cache_manager.originals)
def test_list_backing_images_small(self):
self.stubs.Set(os, 'listdir',
lambda x: ['_base', 'instance-00000001',
'instance-00000002', 'instance-00000003'])
self.stubs.Set(os.path, 'exists',
lambda x: x.find('instance-') != -1)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
found = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [found]
image_cache_manager.instance_names = self.stock_instance_names
inuse_images = image_cache_manager._list_backing_images()
self.assertEqual(inuse_images, [found])
self.assertEqual(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_resized(self):
self.stubs.Set(os, 'listdir',
lambda x: ['_base', 'instance-00000001',
'instance-00000002', 'instance-00000003'])
self.stubs.Set(os.path, 'exists',
lambda x: x.find('instance-') != -1)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: ('e97222e91fc4241f49a7f520d1dcf446751129b3_'
'10737418240'))
found = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_'
'10737418240')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [found]
image_cache_manager.instance_names = self.stock_instance_names
inuse_images = image_cache_manager._list_backing_images()
self.assertEqual(inuse_images, [found])
self.assertEqual(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_instancename(self):
self.stubs.Set(os, 'listdir',
lambda x: ['_base', 'banana-42-hamster'])
self.stubs.Set(os.path, 'exists',
lambda x: x.find('banana-42-hamster') != -1)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: 'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
found = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name,
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [found]
image_cache_manager.instance_names = self.stock_instance_names
inuse_images = image_cache_manager._list_backing_images()
self.assertEqual(inuse_images, [found])
self.assertEqual(len(image_cache_manager.unexplained_images), 0)
def test_list_backing_images_disk_notexist(self):
self.stubs.Set(os, 'listdir',
lambda x: ['_base', 'banana-42-hamster'])
self.stubs.Set(os.path, 'exists',
lambda x: x.find('banana-42-hamster') != -1)
def fake_get_disk(disk_path):
raise processutils.ProcessExecutionError()
self.stubs.Set(libvirt_utils, 'get_disk_backing_file', fake_get_disk)
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = []
image_cache_manager.instance_names = self.stock_instance_names
self.assertRaises(processutils.ProcessExecutionError,
image_cache_manager._list_backing_images)
def test_find_base_file_nothing(self):
self.stubs.Set(os.path, 'exists', lambda x: False)
base_dir = '/var/lib/nova/instances/_base'
fingerprint = '549867354867'
image_cache_manager = imagecache.ImageCacheManager()
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
self.assertEqual(0, len(res))
def test_find_base_file_small(self):
fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
self.stubs.Set(os.path, 'exists',
lambda x: x.endswith('%s_sm' % fingerprint))
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
base_file = os.path.join(base_dir, fingerprint + '_sm')
self.assertEqual(res, [(base_file, True, False)])
def test_find_base_file_resized(self):
fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
listing = ['00000001',
'ephemeral_0_20_None',
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
'00000004']
self.stubs.Set(os, 'listdir', lambda x: listing)
self.stubs.Set(os.path, 'exists',
lambda x: x.endswith('%s_10737418240' % fingerprint))
self.stubs.Set(os.path, 'isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_base_images(base_dir)
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
base_file = os.path.join(base_dir, fingerprint + '_10737418240')
self.assertEqual(res, [(base_file, False, True)])
def test_find_base_file_all(self):
fingerprint = '968dd6cc49e01aaa044ed11c0cce733e0fa44a6a'
listing = ['00000001',
'ephemeral_0_20_None',
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_sm',
'968dd6cc49e01aaa044ed11c0cce733e0fa44a6a_10737418240',
'00000004']
self.stubs.Set(os, 'listdir', lambda x: listing)
self.stubs.Set(os.path, 'exists', lambda x: True)
self.stubs.Set(os.path, 'isfile', lambda x: True)
base_dir = '/var/lib/nova/instances/_base'
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._list_base_images(base_dir)
res = list(image_cache_manager._find_base_file(base_dir, fingerprint))
base_file1 = os.path.join(base_dir, fingerprint)
base_file2 = os.path.join(base_dir, fingerprint + '_sm')
base_file3 = os.path.join(base_dir, fingerprint + '_10737418240')
self.assertEqual(res, [(base_file1, False, False),
(base_file2, True, False),
(base_file3, False, True)])
@contextlib.contextmanager
def _make_base_file(self, checksum=True):
"""Make a base file for testing."""
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
base_file = open(fname, 'w')
base_file.write('data')
base_file.close()
base_file = open(fname, 'r')
if checksum:
imagecache.write_stored_checksum(fname)
base_file.close()
yield fname
def test_remove_base_file(self):
with self._make_base_file() as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
info_fname = imagecache.get_info_filename(fname)
# Files are initially too new to delete
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(info_fname))
# Old files get cleaned up though
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager._remove_base_file(fname)
self.assertFalse(os.path.exists(fname))
self.assertFalse(os.path.exists(info_fname))
def test_remove_base_file_original(self):
with self._make_base_file() as fname:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.originals = [fname]
image_cache_manager._remove_base_file(fname)
info_fname = imagecache.get_info_filename(fname)
# Files are initially too new to delete
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(info_fname))
# This file should stay longer than a resized image
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager._remove_base_file(fname)
self.assertTrue(os.path.exists(fname))
self.assertTrue(os.path.exists(info_fname))
# Originals don't stay forever though
os.utime(fname, (-1, time.time() - 3600 * 25))
image_cache_manager._remove_base_file(fname)
self.assertFalse(os.path.exists(fname))
self.assertFalse(os.path.exists(info_fname))
def test_remove_base_file_dne(self):
# This test is solely to execute the "does not exist" code path. We
# don't expect the method being tested to do anything in this case.
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
def test_remove_base_file_oserror(self):
with intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
os.mkdir(fname)
os.utime(fname, (-1, time.time() - 3601))
# This will raise an OSError because of file permissions
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager._remove_base_file(fname)
self.assertTrue(os.path.exists(fname))
self.assertNotEqual(stream.getvalue().find('Failed to remove'),
-1)
def test_handle_base_image_unused(self):
img = '123'
with self._make_base_file() as fname:
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager._handle_base_image(img, fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files,
[fname])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_used(self):
self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
img = '123'
with self._make_base_file() as fname:
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_used_remotely(self):
self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
img = '123'
with self._make_base_file() as fname:
os.utime(fname, (-1, time.time() - 3601))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (0, 1, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_absent(self):
img = '123'
with intercept_log_messages() as stream:
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, None)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
self.assertNotEqual(stream.getvalue().find('an absent base file'),
-1)
def test_handle_base_image_used_missing(self):
img = '123'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname = os.path.join(tmpdir, 'aaa')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files, [])
def test_handle_base_image_checksum_fails(self):
self.flags(checksum_base_images=True, group='libvirt')
self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
img = '123'
with self._make_base_file() as fname:
with open(fname, 'w') as f:
f.write('banana')
d = {'sha1': '21323454'}
with open('%s.info' % fname, 'w') as f:
f.write(jsonutils.dumps(d))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.unexplained_images = [fname]
image_cache_manager.used_images = {'123': (1, 0, ['banana-42'])}
image_cache_manager._handle_base_image(img, fname)
self.assertEqual(image_cache_manager.unexplained_images, [])
self.assertEqual(image_cache_manager.removable_base_files, [])
self.assertEqual(image_cache_manager.corrupt_base_files,
[fname])
def test_verify_base_images(self):
hashed_1 = '356a192b7913b04c54574d18c28d46e6395428ab'
hashed_21 = '472b07b9fcf2c2451e8781e944bf5f77cd8457c8'
hashed_22 = '12c6fc06c99a462375eeb3f43dfd832b08ca9e17'
hashed_42 = '92cfceb39d57d914ed8b14d0e37643de0797ae56'
self.flags(instances_path='/instance_path',
image_cache_subdirectory_name='_base')
base_file_list = ['00000001',
'ephemeral_0_20_None',
'e97222e91fc4241f49a7f520d1dcf446751129b3_sm',
'e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm',
hashed_42,
hashed_1,
hashed_21,
hashed_22,
'%s_5368709120' % hashed_1,
'%s_10737418240' % hashed_1,
'00000004']
def fq_path(path):
return os.path.join('/instance_path/_base/', path)
# Fake base directory existence
orig_exists = os.path.exists
def exists(path):
# The python coverage tool got angry with my overly broad mocks
if not path.startswith('/instance_path'):
return orig_exists(path)
if path in ['/instance_path',
'/instance_path/_base',
'/instance_path/instance-1/disk',
'/instance_path/instance-2/disk',
'/instance_path/instance-3/disk',
'/instance_path/_base/%s.info' % hashed_42]:
return True
for p in base_file_list:
if path == fq_path(p):
return True
if path == fq_path(p) + '.info':
return False
if path in ['/instance_path/_base/%s_sm' % i for i in [hashed_1,
hashed_21,
hashed_22,
hashed_42]]:
return False
self.fail('Unexpected path existence check: %s' % path)
self.stubs.Set(os.path, 'exists', lambda x: exists(x))
self.stubs.Set(libvirt_utils, 'chown', lambda x, y: None)
# We need to stub utime as well
self.stubs.Set(os, 'utime', lambda x, y: None)
# Fake up some instances in the instances directory
orig_listdir = os.listdir
def listdir(path):
# The python coverage tool got angry with my overly broad mocks
if not path.startswith('/instance_path'):
return orig_listdir(path)
if path == '/instance_path':
return ['instance-1', 'instance-2', 'instance-3', '_base']
if path == '/instance_path/_base':
return base_file_list
self.fail('Unexpected directory listed: %s' % path)
self.stubs.Set(os, 'listdir', lambda x: listdir(x))
# Fake isfile for these faked images in _base
orig_isfile = os.path.isfile
def isfile(path):
# The python coverage tool got angry with my overly broad mocks
if not path.startswith('/instance_path'):
return orig_isfile(path)
for p in base_file_list:
if path == fq_path(p):
return True
self.fail('Unexpected isfile call: %s' % path)
self.stubs.Set(os.path, 'isfile', lambda x: isfile(x))
# Fake the database call which lists running instances
instances = [{'image_ref': '1',
'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
'kernel_id': '21',
'ramdisk_id': '22',
'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
'task_state': ''}]
all_instances = []
for instance in instances:
all_instances.append(fake_instance.fake_instance_obj(
None, **instance))
image_cache_manager = imagecache.ImageCacheManager()
# Fake the utils call which finds the backing image
def get_disk_backing_file(path):
if path in ['/instance_path/instance-1/disk',
'/instance_path/instance-2/disk']:
return fq_path('%s_5368709120' % hashed_1)
self.fail('Unexpected backing file lookup: %s' % path)
self.stubs.Set(libvirt_utils, 'get_disk_backing_file',
lambda x: get_disk_backing_file(x))
# Fake out verifying checksums, as that is tested elsewhere
self.stubs.Set(image_cache_manager, '_verify_checksum',
lambda x, y: True)
# Fake getmtime as well
orig_getmtime = os.path.getmtime
def getmtime(path):
if not path.startswith('/instance_path'):
return orig_getmtime(path)
return 1000000
self.stubs.Set(os.path, 'getmtime', lambda x: getmtime(x))
# Make sure we don't accidentally remove a real file
orig_remove = os.remove
def remove(path):
if not path.startswith('/instance_path'):
return orig_remove(path)
# Don't try to remove fake files
return
self.stubs.Set(os, 'remove', lambda x: remove(x))
# And finally we can make the call we're actually testing...
# The argument here should be a context, but it is mocked out
image_cache_manager.update(None, all_instances)
# Verify
active = [fq_path(hashed_1), fq_path('%s_5368709120' % hashed_1),
fq_path(hashed_21), fq_path(hashed_22)]
for act in active:
self.assertIn(act, image_cache_manager.active_base_files)
self.assertEqual(len(image_cache_manager.active_base_files),
len(active))
for rem in [fq_path('e97222e91fc4241f49a7f520d1dcf446751129b3_sm'),
fq_path('e09c675c2d1cfac32dae3c2d83689c8c94bc693b_sm'),
fq_path(hashed_42),
fq_path('%s_10737418240' % hashed_1)]:
self.assertIn(rem, image_cache_manager.removable_base_files)
# Ensure there are no "corrupt" images as well
self.assertEqual(len(image_cache_manager.corrupt_base_files), 0)
def test_verify_base_images_no_base(self):
self.flags(instances_path='/tmp/no/such/dir/name/please')
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.update(None, [])
def test_is_valid_info_file(self):
hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
self.flags(instances_path='/tmp/no/such/dir/name/please')
self.flags(image_info_filename_pattern=('$instances_path/_base/'
'%(image)s.info'),
group='libvirt')
base_filename = os.path.join(CONF.instances_path, '_base', hashed)
is_valid_info_file = imagecache.is_valid_info_file
self.assertFalse(is_valid_info_file('banana'))
self.assertFalse(is_valid_info_file(
os.path.join(CONF.instances_path, '_base', '00000001')))
self.assertFalse(is_valid_info_file(base_filename))
self.assertFalse(is_valid_info_file(base_filename + '.sha1'))
self.assertTrue(is_valid_info_file(base_filename + '.info'))
def test_configured_checksum_path(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
# Ensure there is a base directory
os.mkdir(os.path.join(tmpdir, '_base'))
# Fake the database call which lists running instances
instances = [{'image_ref': '1',
'host': CONF.host,
'name': 'instance-1',
'uuid': '123',
'vm_state': '',
'task_state': ''},
{'image_ref': '1',
'host': CONF.host,
'name': 'instance-2',
'uuid': '456',
'vm_state': '',
'task_state': ''}]
all_instances = []
for instance in instances:
all_instances.append(fake_instance.fake_instance_obj(
None, **instance))
def touch(filename):
f = open(filename, 'w')
f.write('Touched')
f.close()
old = time.time() - (25 * 3600)
hashed = 'e97222e91fc4241f49a7f520d1dcf446751129b3'
base_filename = os.path.join(tmpdir, hashed)
touch(base_filename)
touch(base_filename + '.info')
os.utime(base_filename + '.info', (old, old))
touch(base_filename + '.info')
os.utime(base_filename + '.info', (old, old))
image_cache_manager = imagecache.ImageCacheManager()
image_cache_manager.update(None, all_instances)
self.assertTrue(os.path.exists(base_filename))
self.assertTrue(os.path.exists(base_filename + '.info'))
def test_compute_manager(self):
was = {'called': False}
def fake_get_all_by_filters(context, *args, **kwargs):
was['called'] = True
instances = []
for x in xrange(2):
instances.append(fake_instance.fake_db_instance(
image_ref='1',
uuid=x,
name=x,
vm_state='',
task_state=''))
return instances
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.stubs.Set(db, 'instance_get_all_by_filters',
fake_get_all_by_filters)
compute = importutils.import_object(CONF.compute_manager)
self.flags(use_local=True, group='conductor')
compute.conductor_api = conductor.API()
compute._run_image_cache_manager_pass(None)
self.assertTrue(was['called'])
class VerifyChecksumTestCase(test.NoDBTestCase):
def setUp(self):
super(VerifyChecksumTestCase, self).setUp()
self.img = {'container_format': 'ami', 'id': '42'}
self.flags(checksum_base_images=True, group='libvirt')
def _make_checksum(self, tmpdir):
testdata = ('OpenStack Software delivers a massively scalable cloud '
'operating system.')
fname = os.path.join(tmpdir, 'aaa')
info_fname = imagecache.get_info_filename(fname)
with open(fname, 'w') as f:
f.write(testdata)
return fname, info_fname, testdata
def _write_file(self, info_fname, info_attr, testdata):
f = open(info_fname, 'w')
if info_attr == "csum valid":
csum = hashlib.sha1()
csum.update(testdata)
f.write('{"sha1": "%s"}\n' % csum.hexdigest())
elif info_attr == "csum invalid, not json":
f.write('banana')
else:
f.write('{"sha1": "banana"}')
f.close()
def _check_body(self, tmpdir, info_attr):
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname, info_fname, testdata = self._make_checksum(tmpdir)
self._write_file(info_fname, info_attr, testdata)
image_cache_manager = imagecache.ImageCacheManager()
return image_cache_manager, fname
def test_verify_checksum(self):
with utils.tempdir() as tmpdir:
image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
res = image_cache_manager._verify_checksum(self.img, fname)
self.assertTrue(res)
def test_verify_checksum_disabled(self):
self.flags(checksum_base_images=False, group='libvirt')
with utils.tempdir() as tmpdir:
image_cache_manager, fname = self._check_body(tmpdir, "csum valid")
res = image_cache_manager._verify_checksum(self.img, fname)
self.assertIsNone(res)
def test_verify_checksum_invalid_json(self):
with intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
image_cache_manager, fname = (
self._check_body(tmpdir, "csum invalid, not json"))
res = image_cache_manager._verify_checksum(
self.img, fname, create_if_missing=False)
self.assertFalse(res)
log = stream.getvalue()
# NOTE(mikal): this is a skip not a fail because the file is
# present, but is not in valid json format and therefore is
# skipped.
self.assertNotEqual(log.find('image verification skipped'), -1)
def test_verify_checksum_invalid_repaired(self):
with utils.tempdir() as tmpdir:
image_cache_manager, fname = (
self._check_body(tmpdir, "csum invalid, not json"))
res = image_cache_manager._verify_checksum(
self.img, fname, create_if_missing=True)
self.assertIsNone(res)
def test_verify_checksum_invalid(self):
with intercept_log_messages() as stream:
with utils.tempdir() as tmpdir:
image_cache_manager, fname = (
self._check_body(tmpdir, "csum invalid, valid json"))
res = image_cache_manager._verify_checksum(self.img, fname)
self.assertFalse(res)
log = stream.getvalue()
self.assertNotEqual(log.find('image verification failed'), -1)
def test_verify_checksum_file_missing(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
self.flags(image_info_filename_pattern=('$instances_path/'
'%(image)s.info'),
group='libvirt')
fname, info_fname, testdata = self._make_checksum(tmpdir)
image_cache_manager = imagecache.ImageCacheManager()
res = image_cache_manager._verify_checksum('aaa', fname)
self.assertIsNone(res)
# Checksum requests for a file with no checksum now have the
# side effect of creating the checksum
self.assertTrue(os.path.exists(info_fname))
|
viggates/nova
|
nova/tests/virt/libvirt/test_imagecache.py
|
Python
|
apache-2.0
| 37,204
|
"""Cobertura Jenkins plugin coverage report collector base classes."""
from abc import ABC
from base_collectors import JenkinsPluginCollector
from model import SourceMeasurement, SourceResponses
class CoberturaJenkinsPluginBaseClass(JenkinsPluginCollector, ABC): # skipcq: PYL-W0223
"""Base class for Cobertura Jenkins plugin collectors."""
plugin = "cobertura"
depth = 2
class CoberturaJenkinsPluginCoverageBaseClass(CoberturaJenkinsPluginBaseClass):
"""Base class for Cobertura Jenkins plugin coverage collectors."""
coverage_type = "subclass responsibility"
async def _parse_source_responses(self, responses: SourceResponses) -> SourceMeasurement:
"""Override to parse the coverage measurements."""
elements = (await responses[0].json())["results"]["elements"]
coverage = [element for element in elements if element["name"].lower() == self.coverage_type][0]
total = int(coverage["denominator"])
return SourceMeasurement(value=str(total - int(coverage["numerator"])), total=str(total))
|
ICTU/quality-time
|
components/collector/src/source_collectors/cobertura_jenkins_plugin/base.py
|
Python
|
apache-2.0
| 1,062
|
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from rudi.models import Team
LABEL_STREET = _("Straße")
LABEL_POSTAL_CODE = _("PLZ")
LABEL_CITY = _("Stadt")
LABEL_DOORBELL = _("Klingelinfo")
LABEL_FIRSTNAME = _("Vorname")
LABEL_LASTNAME = _("Nachname")
LABEL_EMAIL = _("E-Mail")
LABEL_PHONE = _("Handynummer")
LABEL_ALLERGIES = _("Allergien/ Unverträglichkeiten")
LABEL_LIKE = _("Wir würden am liebsten diesen Gang/ einen dieser Gänge "
"zubereiten")
LABEL_DISLIKE = _("Wir würden diesen Gang/ einen dieser Gänge lieber "
"nicht zubereiten")
LABEL_AGREE_TERMS = _("Teilnahmebedingungen")
INFO_NAME = _("Seid kreativ!")
INFO_DOORBELL = _("Gebt an, wo eure Gäste klingeln müssen und "
"in welches Stockwerk sie müssen")
INFO_AGREE_TERMS = _("Hiermit erklärt ihr euch einverstanden, dass wir "
"eure Handynummern an die Teams weiterleiten dürfen, "
"mit denen ihr gemeinsam essen werdet.")
ERROR_AGREE_TERMS =\
_('Du musst mit der weitergabe deiner/ eurer Handynummern '
'einverstanden sein.')
ERROR_PREFERENCES = _('Du kannst den selben Kurs nicht in beiden Listen haben')
class RegistrationForm(forms.ModelForm):
agree_terms = forms.BooleanField(
label=LABEL_AGREE_TERMS,
help_text=INFO_AGREE_TERMS,
error_messages={
'required': ERROR_AGREE_TERMS
})
class Meta:
model = Team
exclude = ("event",)
labels = {
"street": LABEL_STREET,
"postal_code": LABEL_POSTAL_CODE,
"city": LABEL_CITY,
"doorbell": LABEL_DOORBELL,
"participant_1_firstname": LABEL_FIRSTNAME,
"participant_2_firstname": LABEL_FIRSTNAME,
"participant_1_lastname": LABEL_LASTNAME,
"participant_2_lastname": LABEL_LASTNAME,
"participant_1_email": LABEL_EMAIL,
"participant_2_email": LABEL_EMAIL,
"participant_1_phone": LABEL_PHONE,
"participant_2_phone": LABEL_PHONE,
"allergies": LABEL_ALLERGIES,
"like": LABEL_LIKE,
"dislike": LABEL_DISLIKE,
}
help_texts = {
"name": INFO_NAME,
"doorbell": INFO_DOORBELL,
}
widgets = {
"name": forms.TextInput(attrs={'class': 'form-control'}),
"street": forms.TextInput(attrs={'class': 'form-control'}),
"city": forms.TextInput(attrs={'class': 'form-control'}),
"postal_code": forms.TextInput(attrs={'class': 'form-control'}),
"doorbell": forms.TextInput(attrs={'class': 'form-control'}),
"participant_1_firstname": forms.TextInput(
attrs={'class': 'form-control'}),
"participant_1_lastname": forms.TextInput(
attrs={'class': 'form-control'}),
"participant_1_email": forms.EmailInput(
attrs={'class': 'form-control'}),
"participant_1_phone": forms.TextInput(
attrs={'class': 'form-control'}),
"participant_2_firstname": forms.TextInput(
attrs={'class': 'form-control'}),
"participant_2_lastname": forms.TextInput(
attrs={'class': 'form-control'}),
"participant_2_email": forms.EmailInput(
attrs={'class': 'form-control'}),
"participant_2_phone": forms.TextInput(
attrs={'class': 'form-control'}),
"allergies": forms.Textarea(attrs={'class': 'form-control'}),
"like": forms.CheckboxSelectMultiple(),
"dislike": forms.CheckboxSelectMultiple(),
}
def clean(self):
cleaned_data = super(RegistrationForm, self).clean()
like = set(cleaned_data.get("like"))
dislike = set(cleaned_data.get("dislike"))
intersect = like & dislike
if intersect:
raise ValidationError(ERROR_PREFERENCES)
class CustomMailForm(forms.Form):
subject = forms.CharField(label="Subject")
body = forms.CharField(label="Body", widget=forms.Textarea)
|
lexxodus/rudi
|
forms.py
|
Python
|
agpl-3.0
| 4,287
|
__author__ = 'Ivan'
from django import forms
from lite_note import models
class NoteCreateForm(forms.ModelForm):
class Meta:
model = models.Note
fields = ('title', 'category', 'note', )
class NoteForm(forms.ModelForm):
is_favorite = forms.BooleanField()
is_public = forms.BooleanField()
class Meta:
model = models.Note
exclude = ('id',)
|
Shiwin/LiteNote
|
lite_note/forms.py
|
Python
|
gpl-2.0
| 390
|
"""This module implements a class that..."""
from __future__ import print_function, unicode_literals
from builtins import map
import logging
import re
from kivy.app import App
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.properties import StringProperty
from kivy.uix.popup import Popup
from MUSCIMarker.utils import keypress_to_dispatch_key
__version__ = "0.0.1"
__author__ = "Jan Hajic jr."
objid_selection_dialog_kv = '''
<ObjidSelectionDialog@Popup>
size_hint: None, None
size: app.root.size[0] * 0.5, app.root.size[1] * 0.2
pos_hint: {'center_x': 0.5, 'center_y': 0.5}
title: 'Select CropObjects by typing their objids (whitespace-separated).'
# on_text: current_name_label.text = self.get_current_name()
GridLayout:
id: grid
cols: 1
padding: '24dp'
TextInput:
id: text_input
size_hint_y: None
height: dp(24)
multiline: False
focus: True
text: ''
on_text: root.text = self.text
BoxLayout:
size_hint_y: None
height: dp(24)
Button:
id: cancel
text: root.cancel_text
on_release: root.cancel()
Button:
id: ok
text: root.ok_text
on_release: root.ok()
Label:
id: available_names_label
size_hint_y: None
height: dp(24)
text: ''
'''
Builder.load_string(objid_selection_dialog_kv)
class ObjidSelectionDialog(Popup):
"""The ObjidSelectionDialog class enables selecting specific CropObjects
through typing their `objid`."""
text = StringProperty('')
ok_text = StringProperty('OK')
cancel_text = StringProperty('Cancel')
__events__ = ('on_ok', 'on_cancel')
def __init__(self, *args, **kwargs):
super(ObjidSelectionDialog, self).__init__(*args, **kwargs)
self.create_bindings()
def ok(self):
self.dispatch('on_ok')
self.dismiss()
def cancel(self):
self.dispatch('on_cancel')
self.dismiss()
def on_ok(self):
# This is the "working" method.
self.do_objid_selection()
def on_cancel(self):
self.dismiss()
def dismiss(self, *largs, **kwargs):
self.remove_bindings()
super(ObjidSelectionDialog, self).dismiss()
def create_bindings(self):
Window.bind(on_key_down=self.on_key_down)
Window.bind(on_key_up=self.on_key_up)
def remove_bindings(self):
Window.unbind(on_key_down=self.on_key_down)
Window.unbind(on_key_up=self.on_key_up)
def on_key_down(self, window, key, scancode, codepoint, modifier):
# Should control enter to confirm/escape to cancel
dispatch_key = keypress_to_dispatch_key(key, scancode, codepoint, modifier)
logging.info('ObjidSelectionDialog: Handling keypress: {0}'.format(dispatch_key))
is_handled = self.handle_dispatch_key(dispatch_key)
# Don't let the event propagate through the dialog.
return True
def handle_dispatch_key(self, dispatch_key):
"""Does the "heavy lifting" in keyboard controls: responds to a dispatch key.
Decoupling this into a separate method facillitates giving commands to
the ListView programmatically, not just through user input,
and this way makes automation easier.
:param dispatch_key: A string of the form e.g. ``109+alt,shift``: the ``key``
number, ``+``, and comma-separated modifiers.
:returns: True if the dispatch key got handled, False if there is
no response defined for the given dispatch key.
"""
if dispatch_key == '13': # Enter
logging.info('Confirming MLClassSelectionDialog!')
self.ok()
elif dispatch_key == '27': # Escape
logging.info('Cancelling MLClassSelectionDialog!')
self.cancel()
#elif dispatch_key == '9': # Tab
# pass
# Special keys are handled separately in the TextInput, so
# they would get caught by the "return True". We need to call
# their operations explicitly.
elif dispatch_key == '8': # Backspace
self.ids['text_input'].do_backspace()
elif dispatch_key == '9': # Tab
# Process common prefix
lcp = self._longest_common_prefix
infix = lcp[len(self.text):]
logging.info('MLClassSelectionDialog: Found LCP {0}, inf {1}'
''.format(lcp, infix))
self.ids['text_input'].text = self.text + infix
else:
return False
return True
def on_key_up(self, window, key, scancode, *args, **kwargs):
return False
######################################################
# The objid selection behavior
def do_objid_selection(self):
objids = list(map(int, re.split('\W+', self.text)))
view = App.get_running_app().cropobject_list_renderer.view
available_objids = frozenset(list(App.get_running_app().annot_model.cropobjects.keys()))
cropobject_views = [view.get_cropobject_view(objid) for objid in objids
if objid in available_objids]
view.unselect_all()
for v in cropobject_views:
if not v.is_selected:
v.dispatch('on_release')
|
hajicj/MUSCIMarker
|
MUSCIMarker/objid_selection.py
|
Python
|
apache-2.0
| 5,487
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ESCOM.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
CallmeTorre/Idalia
|
ESCOM/manage.py
|
Python
|
apache-2.0
| 248
|
import click
from groceries import __version__
from groceries.config import Wizard
from groceries.utils import Item
@click.group()
@click.option('-c', '--config', default='.groceries.yml',
envvar='GROCERIES_CONFIG', callback=Wizard(),
help='Location of the configuration file.')
@click.pass_context
def cli(ctx, config):
'''A command line interface for the Groceries API.'''
ctx.obj = config
@cli.command()
def version():
'''Display the version of this tool.'''
click.echo('groceries-cli/{}'.format(__version__))
@cli.command()
@click.argument('name')
@click.pass_context
def add(ctx, name):
'''Add an item to the groceries list.'''
if not Item(ctx).create(name):
click.echo('Unable to add {0} to the list'.format(name))
ctx.exit(1)
@cli.command()
@click.argument('name')
@click.pass_context
def buy(ctx, name):
'''Buy an item on the groceries list.'''
if not Item(ctx).update(name):
click.echo('Unable to find {0} on the list.'.format(name))
ctx.exit(1)
@cli.command()
@click.pass_context
def list(ctx):
'''List all items on the groceries list.'''
for item in Item(ctx).read():
click.echo('{0}'.format(item.get('name')))
@cli.command()
@click.argument('name')
@click.pass_context
def remove(ctx, name):
'''Remove an item from the groceries list.'''
if not Item(ctx).delete(name):
click.echo('Unable to find {0} on the list.'.format(name))
ctx.exit(1)
|
Mytho/groceries-cli
|
groceries/core.py
|
Python
|
mit
| 1,497
|
def agts(queue):
bulk = queue.add('bulk.py', ncpus=4, walltime=6)
surf = queue.add('surface.py', ncpus=4, walltime=6)
sigma = queue.add('sigma.py', deps=[bulk, surf])
queue.add('fig2.py', deps=sigma, creates='fig2.png')
|
robwarm/gpaw-symm
|
doc/tutorials/jellium/jellium.agts.py
|
Python
|
gpl-3.0
| 236
|
"""
Django settings for dojob project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=+lo=xy3@0s!71tcfz3ghq*#(@_-#ntty80r41ra^%t32!+zcb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dojob.urls'
WSGI_APPLICATION = 'dojob.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
oxente/dojob
|
dojob/settings.py
|
Python
|
gpl-2.0
| 1,969
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutIteration(Koan):
def test_iterators_are_a_type(self):
it = iter(range(1, 6))
fib = 0
for num in it:
fib += num
self.assertEqual(15, fib)
def test_iterating_with_next(self):
stages = iter(['alpha', 'beta', 'gamma'])
try:
self.assertEqual('alpha', next(stages))
next(stages)
self.assertEqual('gamma', next(stages))
next(stages)
except StopIteration as ex:
err_msg = 'Ran out of iterations'
self.assertMatch('out', err_msg)
# ------------------------------------------------------------------
def add_ten(self, item):
return item + 10
def test_map_transforms_elements_of_a_list(self):
seq = [1, 2, 3]
mapped_seq = map(self.add_ten, seq)
self.assertEqual([11, 12, 13], mapped_seq)
def test_filter_selects_certain_items_from_a_list(self):
def is_even(item):
return (item % 2) == 0
seq = [1, 2, 3, 4, 5, 6]
even_numbers = filter(is_even, seq)
self.assertEqual([2, 4, 6], even_numbers)
def test_just_return_first_item_found(self):
def is_big_name(item):
return len(item) > 4
names = ["Jim", "Bill", "Clarence", "Doug", "Eli"]
# NOTE This still iterates through the whole names, so not particularly
# efficient
self.assertEqual(['Clarence'], filter(is_big_name, names)[:1])
# Boring but effective
for item in names:
if is_big_name(item):
self.assertEqual('Clarence', item)
break
# ------------------------------------------------------------------
def add(self, accum, item):
return accum + item
def multiply(self, accum, item):
return accum * item
def test_reduce_will_blow_your_mind(self):
result = reduce(self.add, [2, 3, 4])
self.assertEqual(9, result)
result2 = reduce(self.multiply, [2, 3, 4], 1)
self.assertEqual(24, result2)
# Extra Credit:
# Describe in your own words what reduce does.
# ------------------------------------------------------------------
def test_use_pass_for_iterations_with_no_body(self):
for num in range(1, 5):
pass
self.assertEqual(4, num)
# ------------------------------------------------------------------
def test_all_iteration_methods_work_on_any_sequence_not_just_lists(self):
# Ranges are an iteratable sequence
result = map(self.add_ten, range(1, 4))
self.assertEqual([11, 12, 13], list(result))
try:
f = open("example_file.txt")
try:
def make_upcase(line):
return line.strip().upper()
upcase_lines = map(make_upcase, f.readlines())
self.assertEqual(['THIS', 'IS', 'A', 'TEST'], list(upcase_lines))
finally:
# Arg, this is ugly.
# We will figure out how to fix this later.
f.close()
except IOError:
# should never happen
self.fail()
|
bnmalcabis/testpy
|
python2/koans/about_iteration.py
|
Python
|
mit
| 3,274
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras premade models using tf.distribute.Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adagrad
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.premade import linear
from tensorflow.python.keras.premade import wide_deep
from tensorflow.python.platform import test
def strategy_combinations_eager_data_fn():
return combinations.combine(
distribution=[
strategy_combinations.default_strategy,
strategy_combinations.one_device_strategy,
strategy_combinations.one_device_strategy_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.mirrored_strategy_with_two_gpus
],
mode=['eager'],
data_fn=[get_numpy, get_dataset])
def get_numpy():
inputs = np.random.uniform(low=-5, high=5, size=(64, 2)).astype(np.float32)
output = .3 * inputs[:, 0] + .2 * inputs[:, 1]
return inputs, output
def get_dataset():
inputs, output = get_numpy()
dataset = dataset_ops.Dataset.from_tensor_slices((inputs, output))
dataset = dataset.batch(10).repeat(10)
return dataset
class KerasPremadeModelsTest(test.TestCase, parameterized.TestCase):
@combinations.generate(strategy_combinations_eager_data_fn())
def test_linear_model(self, distribution, data_fn):
with distribution.scope():
model = linear.LinearModel()
opt = gradient_descent.SGD(learning_rate=0.1)
model.compile(opt, 'mse')
if data_fn == get_numpy:
inputs, output = get_numpy()
hist = model.fit(inputs, output, epochs=5)
else:
hist = model.fit(get_dataset(), epochs=5)
self.assertLess(hist.history['loss'][4], 0.2)
@combinations.generate(strategy_combinations_eager_data_fn())
def test_wide_deep_model(self, distribution, data_fn):
with distribution.scope():
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_opt = gradient_descent.SGD(learning_rate=0.05)
dnn_opt = adagrad.Adagrad(learning_rate=0.1)
wide_deep_model.compile(
optimizer=[linear_opt, dnn_opt],
loss='mse')
if data_fn == get_numpy:
inputs, output = get_numpy()
hist = wide_deep_model.fit(inputs, output, epochs=5)
else:
hist = wide_deep_model.fit(get_dataset(), epochs=5)
self.assertLess(hist.history['loss'][4], 0.2)
if __name__ == '__main__':
test.main()
|
aldian/tensorflow
|
tensorflow/python/keras/distribute/keras_premade_models_test.py
|
Python
|
apache-2.0
| 3,689
|
#-------------------------------------------------------------------------------
#
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 11/04/2005
#
#-------------------------------------------------------------------------------
""" Defines the DockWindowShell class used to house drag and drag DockWindow
items that are dropped on the desktop or on the DockWindowShell window.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
import wx
# Fixme: Hack to force 'image_slice' to be added via Category to Theme class:
import enthought.traits.ui.wx
from enthought.traits.api \
import HasPrivateTraits, Instance
from enthought.traits.ui.api \
import View, Group
from enthought.pyface.api import SystemMetrics
from enthought.pyface.image_resource \
import ImageResource
from dock_window \
import DockWindow
from dock_sizer \
import DockSizer, DockSection, DockRegion, DockControl, DOCK_RIGHT
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
# DockWindowShell frame icon:
FrameIcon = ImageResource( 'shell.ico' )
#-------------------------------------------------------------------------------
# 'DockWindowShell' class:
#-------------------------------------------------------------------------------
class DockWindowShell ( HasPrivateTraits ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# The wx.Frame window which is the actual shell:
control = Instance( wx.Frame )
#---------------------------------------------------------------------------
# Initializes the object:
#---------------------------------------------------------------------------
def __init__ ( self, dock_control, use_mouse = False, **traits ):
super( DockWindowShell, self ).__init__( **traits )
old_control = dock_control.control
parent = wx.GetTopLevelParent( old_control )
while True:
next_parent = parent.GetParent()
if next_parent is None:
break
parent = next_parent
self.control = shell = wx.Frame( parent, -1, dock_control.name,
style = wx.DEFAULT_FRAME_STYLE |
wx.FRAME_FLOAT_ON_PARENT |
wx.FRAME_NO_TASKBAR )
shell.SetIcon( FrameIcon.create_icon() )
shell.SetBackgroundColour( SystemMetrics().dialog_background_color )
wx.EVT_CLOSE( shell, self._on_close )
theme = dock_control.theme
self._dock_window = dw = DockWindow( shell, auto_close = True,
theme = theme ).set(
style = 'tab' )
sizer = wx.BoxSizer( wx.VERTICAL )
sizer.Add( dw.control, 1, wx.EXPAND )
shell.SetSizer( sizer )
if use_mouse:
x, y = wx.GetMousePosition()
else:
x, y = old_control.GetPositionTuple()
x, y = old_control.GetParent().ClientToScreenXY( x, y )
dx, dy = old_control.GetSize()
tis = theme.tab.image_slice
tc = theme.tab.content
tdy = theme.tab_active.image_slice.dy
dx += (tis.xleft + tc.left + tis.xright + tc.right)
dy += (tis.xtop + tc.top + tis.xbottom + tc.bottom + tdy)
self.add_control( dock_control )
# Set the correct window size and position, accounting for the tab size
# and window borders:
shell.SetDimensions( x, y, dx, dy )
cdx, cdy = shell.GetClientSizeTuple()
ex_dx = dx - cdx
ex_dy = dy - cdy
shell.SetDimensions( x - (ex_dx / 2) - tis.xleft - tc.left,
y - ex_dy + (ex_dx / 2) - tdy - tis.xtop - tc.top,
dx + ex_dx, dy + ex_dy )
shell.Show()
#---------------------------------------------------------------------------
# Adds a new DockControl to the shell window:
#---------------------------------------------------------------------------
def add_control ( self, dock_control ):
""" Adds a new DockControl to the shell window.
"""
dw = self._dock_window.control
dockable = dock_control.dockable
# If the current DockControl should be closed, then do it:
close = dockable.dockable_should_close()
if close:
dock_control.close( force = True )
# Create the new control:
control = dockable.dockable_get_control( dw )
# If the DockControl was closed, then reset it to point to the new
# control:
if close:
dock_control.set( control = control, style = 'tab' )
else:
# Create a DockControl to describe the new control:
dock_control = DockControl( control = control,
name = dock_control.name,
export = dock_control.export,
style = 'tab',
image = dock_control.image,
closeable = True )
# Finish initializing the DockControl:
dockable.dockable_init_dockcontrol( dock_control )
# Get the current DockSizer:
sizer = dw.GetSizer()
if sizer is None:
# Create the initial sizer:
dw.SetSizer( DockSizer( DockSection( contents = [ DockRegion(
contents = [ dock_control ] ) ] ) ) )
else:
# Sizer exists already, try to add the DockControl as a new
# notebook tab. If the user has reorganized the layout, then just
# dock it on the right side somewhere:
section = sizer.GetContents()
region = section.contents[0]
if isinstance( region, DockRegion ):
region.add( dock_control )
else:
section.add( dock_control, region, DOCK_RIGHT )
# Force the control to update:
dw.Layout()
dw.Refresh()
#---------------------------------------------------------------------------
# Handles the user attempting to close the window:
#---------------------------------------------------------------------------
def _on_close ( self, event ):
""" Handles the user attempting to close the window.
"""
window = self._dock_window.control
section = window.GetSizer().GetContents()
n = len( section.contents )
# Try to close each individual control:
for control in section.get_controls():
control.close( layout = False )
# If some, but not all, were closed, make sure the window gets updated:
if 0 < len( section.contents ) < n:
window.Layout()
window.Refresh()
|
enthought/traitsgui
|
enthought/pyface/dock/dock_window_shell.py
|
Python
|
bsd-3-clause
| 7,733
|
from tests.Dispatcher.DataCreator import CreateDeviceData
import json
import unittest
from bin.Devices.Containers.ContainersDevice import ContainersDevice
from bin.Devices.Containers.ContainersManager import EventlessContainersManager, NullContainersManager, \
ContainersManager
from bin.Devices.Containers.ContainersFactory import ContainersFactory
from bin.Devices.Containers.ContainersManagerFactory import ContainersManagerFactory
from circuits import BaseComponent
from bin.Settings.SettingsSerialEntity import SettingsSerialEntity
from bin.Dispatcher.Dictionary import SettingsKeys
from unittest.mock import MagicMock
class TestContainerDeviceDataFlow(unittest.TestCase):
def setUp(self):
self.eventless_mgr = EventlessContainersManager()
self.null_mgr = NullContainersManager()
self.sample_data = {"sample": "data"}
self.data = CreateDeviceData()
self.base_component = BaseComponent()
def test_eventless_data_flow(self):
container = ContainersDevice(self.eventless_mgr)
container.update_data_incoming(self.sample_data)
self.assertDictEqual(self.sample_data, json.loads(self.eventless_mgr.line_sent))
def test_creation_of_containers_device_by_containers_factory(self):
containers_settings = self._create_settings()
containers_factory = ContainersFactory(ContainersManagerFactory(self.base_component, containers_settings))
containers = containers_factory.create()
self.assertIsInstance(containers, ContainersDevice)
def test_creation_of_null_containers_manager(self):
containers_manager_factory = ContainersManagerFactory(self.base_component, self._create_settings())
containers_manager = containers_manager_factory.create()
self.assertIsInstance(containers_manager, NullContainersManager)
def test_creation_of_real_containers_manager(self):
containers_manager_factory = ContainersManagerFactory(self.base_component, self._create_settings())
containers_manager_factory.port_exists = MagicMock(return_value=containers_manager_factory.port)
containers_manager = containers_manager_factory.create()
self.assertIsInstance(containers_manager, ContainersManager)
def _create_settings(self):
containers_settings = SettingsSerialEntity(key=SettingsKeys.CONTAINERS)
containers_settings.add_entries(self.data.containers_dict)
return containers_settings
if __name__ == "__main__":
unittest.main()
|
rCorvidae/OrionPI
|
src/tests/Devices/Containers/TestContainersDeviceAndManager.py
|
Python
|
mit
| 2,509
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
}
complete_apps = ['wagtailusers']
|
benemery/wagtail
|
wagtail/wagtailusers/south_migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 356
|
from debian_tools.debian_tools import DebianTools
def test_run_without_arguments() -> None:
try:
DebianTools([])
assert False
except SystemExit as exception:
assert str(exception) == '1'
def test_run_with_help_argument() -> None:
try:
DebianTools(['--help'])
assert False
except SystemExit as exception:
assert str(exception) == '0'
def test_generate() -> None:
assert DebianTools([
'--hostname', 'example',
'--domain', 'example.org',
'--root-password', 'root',
'--user-name', 'example',
'--user-password', 'example',
'--user-real-name', 'Example User'
]) is not None
def test_output_document() -> None:
assert DebianTools([
'--hostname', 'example',
'--domain', 'example.org',
'--root-password', 'root',
'--user-name', 'example',
'--user-password', 'example',
'--user-real-name', 'Example User',
'--output-document', 'test.cfg'
]) is not None
def test_generate_static_networking() -> None:
assert DebianTools([
'--hostname', 'example',
'--domain', 'example.org',
'--root-password', 'root',
'--user-name', 'example',
'--user-password', 'example',
'--user-real-name', 'Example User',
'--static-networking',
'--address', '10.0.0.2',
'--netmask', '255.0.0.0',
'--gateway', '10.0.0.1',
'--nameserver', '10.0.0.1'
]) is not None
|
FunTimeCoding/debian-tools
|
tests/test_debian_tools.py
|
Python
|
mit
| 1,513
|
from workalendar.core import WesternCalendar, ChristianMixin
from workalendar.core import MON, TUE, FRI
from datetime import date
class Australia(WesternCalendar, ChristianMixin):
"Australia"
include_good_friday = True
include_easter_monday = True
include_queens_birthday = False
include_labour_day_october = False
# Shall we shift Anzac Day?
shift_anzac_day = True
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(1, 26, "Australia Day"),
)
def get_canberra_day(self, year):
return (
Australia.get_nth_weekday_in_month(year, 3, MON, 2),
"Canberra Day"
)
def get_queens_birthday(self, year):
return (
Australia.get_nth_weekday_in_month(year, 6, MON, 2),
"Queen's Birthday"
)
def get_labour_day_october(self, year):
return (
Australia.get_nth_weekday_in_month(year, 10, MON),
'Labour Day'
)
def get_anzac_day(self, year):
anzac_day = date(year, 4, 25)
if not self.shift_anzac_day:
return (anzac_day, "Anzac Day")
if anzac_day.weekday() in self.get_weekend_days():
anzac_day = self.find_following_working_day(anzac_day)
return (anzac_day, "Anzac Day")
def get_variable_days(self, year):
# usual variable days
days = super(Australia, self).get_variable_days(year)
january_first = date(year, 1, 1)
if january_first.weekday() in self.get_weekend_days():
days.append((
self.find_following_working_day(january_first),
"New Year's Day shift")
)
australia_day = date(year, 1, 26)
if australia_day.weekday() in self.get_weekend_days():
days.append((
self.find_following_working_day(australia_day),
"Australia Day shift")
)
# was fixed, but might be shifted
days.append(self.get_anzac_day(year))
if self.include_queens_birthday:
days.append(self.get_queens_birthday(year))
if self.include_labour_day_october:
days.append(self.get_labour_day_october(year))
return days
class AustraliaCapitalTerritory(Australia):
"Australia Capital Territory"
include_easter_saturday = True
include_queens_birthday = True
include_labour_day_october = True
include_boxing_day = True
def get_family_community_day(self, year):
# Since this day is picked unsing the school year calendar, there's no
# mathematical way yet to provide it surely
# TODO: Family & Community Day was celebrated on the first Tuesday of
# November in 2007, 2008 and 2009
if year == 2010:
day = date(2010, 9, 27)
elif year == 2011:
day = date(2011, 10, 10)
elif year == 2012:
day = date(2012, 10, 8)
elif year == 2013:
day = date(2013, 9, 30)
elif year == 2014:
day = date(2014, 9, 29)
else:
raise Exception("Year %d is not implemented, Sorry" % year)
return (day, "Family & Community Day")
def get_variable_days(self, year):
days = super(AustraliaCapitalTerritory, self) \
.get_variable_days(year)
days += [
self.get_canberra_day(year),
self.get_family_community_day(year),
]
return days
class AustraliaNewSouthWales(Australia):
"Australia New South Wales"
include_queens_birthday = True
include_easter_saturday = True
include_easter_sunday = True
include_labour_day_october = True
include_boxing_day = True
shift_anzac_day = False
class AustraliaNorthernTerritory(Australia):
"Australia Northern Territory"
include_easter_saturday = True
include_queens_birthday = True
include_boxing_day = True
def get_may_day(self, year):
return (
AustraliaNorthernTerritory.get_nth_weekday_in_month(
year, 5, MON),
"May Day"
)
def get_picnic_day(self, year):
return (
AustraliaNorthernTerritory.get_nth_weekday_in_month(
year, 8, MON),
"Picnic Day"
)
def get_variable_days(self, year):
days = super(AustraliaNorthernTerritory, self) \
.get_variable_days(year)
days += [
self.get_may_day(year),
self.get_picnic_day(year),
]
return days
class AustraliaQueensland(Australia):
"Australia Queensland"
include_easter_saturday = True
include_queens_birthday = True
include_boxing_day = True
def get_labour_day_may(self, year):
return (
AustraliaNorthernTerritory.get_nth_weekday_in_month(
year, 5, MON),
"Labour Day"
)
def get_variable_days(self, year):
days = super(AustraliaQueensland, self) \
.get_variable_days(year)
days += [
self.get_labour_day_may(year),
]
return days
class SouthAustralia(Australia):
"South Australia"
include_easter_saturday = True
include_queens_birthday = True
include_labour_day_october = True
def get_adelaides_cup(self, year):
return (
SouthAustralia.get_nth_weekday_in_month(
year, 3, MON, 2),
"Adelaide's cup"
)
def get_proclamation_day(self, year):
return (date(year, 12, 26), "Proclamation Day")
def get_variable_days(self, year):
days = super(SouthAustralia, self) \
.get_variable_days(year)
days += [
self.get_adelaides_cup(year),
self.get_proclamation_day(year),
]
return days
class Tasmania(Australia):
"Tasmania"
include_queens_birthday = True
include_boxing_day = True
shift_anzac_day = False
@property
def has_recreation_day(self):
return True
def get_eight_hours_day(self, year):
return (
Tasmania.get_nth_weekday_in_month(year, 3, MON, 2),
"Eight hours Day"
)
def get_recreation_day(self, year):
return (
Tasmania.get_nth_weekday_in_month(year, 11, MON),
"Recreation Day"
)
def get_variable_days(self, year):
days = super(Tasmania, self).get_variable_days(year)
days.append(self.get_eight_hours_day(year))
if self.has_recreation_day:
days.append(self.get_recreation_day(year))
return days
class Hobart(Tasmania):
"Hobart"
@property
def has_recreation_day(self):
return False
def get_hobart(self, year):
return (
Hobart.get_nth_weekday_in_month(year, 2, MON, 2),
"Royal Hobart Regatta"
)
def get_variable_days(self, year):
days = super(Hobart, self).get_variable_days(year)
days.append(self.get_hobart(year))
return days
class Victoria(Australia):
"Victoria"
include_easter_saturday = True
include_queens_birthday = True
include_boxing_day = True
def get_labours_day_in_march(self, year):
return (
Victoria.get_nth_weekday_in_month(year, 3, MON, 2),
"Labour Day"
)
def get_melbourne_cup(self, year):
return (
Victoria.get_nth_weekday_in_month(year, 11, TUE),
"Melbourne Cup"
)
def get_variable_days(self, year):
days = super(Victoria, self).get_variable_days(year)
days.append(self.get_labours_day_in_march(year))
days.append(self.get_melbourne_cup(year))
return days
class WesternAustralia(Australia):
"Western Australia"
include_boxing_day = True
def get_labours_day_in_march(self, year):
return (
WesternAustralia.get_nth_weekday_in_month(year, 3, MON),
"Labour Day"
)
def get_western_australia_day(self, year):
return (
WesternAustralia.get_nth_weekday_in_month(year, 6, MON),
"Western Australia Day"
)
def get_variable_days(self, year):
# It is not possible to surely compute Queen's Birthday holiday in
# The western Australia territory, since it's based on the Governor
# Decision (it is typically the last Monday of September or the first
# Monday of October)
days = super(WesternAustralia, self).get_variable_days(year)
days.append(self.get_labours_day_in_march(year))
days.append(self.get_western_australia_day(year))
return days
class MarshallIslands(WesternCalendar, ChristianMixin):
"Marshall Islands"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(3, 3, "Remembrance Day"),
(5, 1, "Constitution Day"),
(11, 17, "Presidents' Day"),
(12, 31, "New Year's Eve"),
)
include_good_friday = True
def get_variable_days(self, year):
days = super(MarshallIslands, self).get_variable_days(year)
days.append((
MarshallIslands.get_nth_weekday_in_month(year, 7, FRI),
"Fishermen's Holiday"
))
days.append((
MarshallIslands.get_nth_weekday_in_month(year, 9, FRI),
"Labour Day"
))
days.append((
MarshallIslands.get_last_weekday_in_month(year, 9, FRI),
"Manit Day"
))
days.append((
MarshallIslands.get_nth_weekday_in_month(year, 12, FRI),
"Gospel Day"
))
return days
|
ChrisStevens/workalendar
|
workalendar/oceania.py
|
Python
|
mit
| 9,639
|
#!/usr/bin/env python
import Queue
import argparse
import json
import os
import traceback
import yaml
from Tkinter import *
from twisted.internet import protocol, utils, task, tksupport, reactor
from twisted.python import failure
from cStringIO import StringIO
import urllib2
# from datetime import datetime
# from urllib import urlencode
import logging
from copy import deepcopy
class ColorShade(object):
"""
Handle color shading.
"""
def __init__(self, min_rgb, max_rgb):
"""
Constructor.
:param min_rgb: color at 0.0 as (R, G, B)
:param max_rgb: color at 1.0 as (R, G, B)
"""
logging.debug('Min colors: {}'.format(min_rgb))
logging.debug('Max colors: {}'.format(max_rgb))
self.min_rgb = min_rgb
self.max_rgb = max_rgb
self.r_diff = self.max_rgb[0] - self.min_rgb[0]
self.g_diff = self.max_rgb[1] - self.min_rgb[1]
self.b_diff = self.max_rgb[2] - self.min_rgb[2]
logging.debug('color diff: {},{},{}'.format(self.r_diff, self.g_diff, self.b_diff))
def shade(self, fraction):
"""
Shade this color based on the given fraction
:param fraction:
:return: a color string "#rrggbb", e.g. "#ab34c4"
"""
return '#{:04x}{:04x}{:04x}'.format(int(self.min_rgb[0] + (self.r_diff * fraction)),
int(self.min_rgb[1] + (self.g_diff * fraction)),
int(self.min_rgb[2] + (self.b_diff * fraction)))
class SensorBase(object):
"""
Base class for sensors executing external processes.
"""
STATUS = ':status'
FRACTION = ':fraction'
STATE = ':state'
def __init__(self, queue, update_interval=10.0):
"""
Constructor. This will create the sensor, but not start updates. Call the start()
method to start sensor updates.
:param queue: the queue which this sensor shall put data in.
:param update_interval: time in seconds (float) between sensor updates
"""
self.queue = queue
self.update_interval = update_interval if update_interval else 5.0
self.timer = None
self.value_type = None
def start(self):
"""
Start updating the sensor. The method timeout_triggered() will be called when an
update is requested,
:return:
"""
logging.debug('Sleeping for %f seconds', self.update_interval)
self.timer = reactor.callLater(0.0, self.timeout_triggered)
def timeout_triggered(self):
"""
Called when timer fires. Calls the run() method.
:return:
"""
logging.debug('Sensor awoke')
self.run()
def update(self, result, value_type=None):
"""
Put a result in the queue and start update timer. Sub-classes must ensure that this
method is called at some point for each update, otherwise the timer will not be restarted.
:param result: data to send to emitter.
:param value_type: type of value. Default is self.value_type.
:return:
"""
logging.debug('Sensor result: %s', str(result))
self.queue.put({'value-type': value_type if value_type else self.value_type, 'result': result})
self.timer = reactor.callLater(self.update_interval, self.timeout_triggered)
class StatusSensor(SensorBase):
"""
Run a program and return the exit status during update.
"""
def __init__(self, queue, config):
"""
Constructor.
:param queue: the queue to put data in
:param config: the sensor configuration dict
"""
super(StatusSensor, self).__init__(queue, config['update-interval'] if 'update-interval' in config else None)
self.program = config['program'] if 'program' in config else None
self.value_type = self.STATUS
def run(self):
"""
Run the configured program and call the update() method with the exit status.
"""
if not self.program:
self.update(-1)
return
use_shell = not (isinstance(self.program, (list, tuple)))
logging.debug('Calling %s',
self.program if use_shell else ' '.join(self.program))
d = utils.getProcessValue(self.program[0], self.program[1:] if len(self.program) > 1 else [],
env=os.environ)
d.addCallback(self.update)
class StateSensor(SensorBase):
"""
Run a program and return the first line of output.
"""
def __init__(self, queue, config):
"""
Constructor.
:param queue: the queue to put data in
:param config: the sensor configuration dict
"""
super(StateSensor, self).__init__(queue, config['update-interval'] if 'update-interval' in config else None)
self.program = config['program'] if 'program' in config else None
self.value_type = self.STATE
def run(self):
"""
Run the configured program and call the update() method with the first line printed by the program.
"""
if not self.program:
self.update(-1)
return
use_shell = not (isinstance(self.program, (list, tuple)))
logging.debug('Calling %s',
self.program if use_shell else ' '.join(self.program))
d = utils.getProcessOutput(self.program[0], self.program[1:] if len(self.program) > 1 else [],
env=os.environ)
d.addCallbacks(self.got_output, self.no_output)
def no_output(self, err):
logging.debug("Got %s", err)
self.update(-1)
def got_output(self, output):
result = output.split("\n")[0].strip() if output else ''
logging.debug("Got %s --> %s", output, result)
self.update(result)
class FractionSensor(SensorBase):
"""
Run a program which is expected to print a float value in the range 0.0..1.0.
"""
def __init__(self, queue, config):
"""
Constructor.
:param queue: queue to put data in
:param config: the sensor configuration dict
"""
super(FractionSensor, self).__init__(queue, config['update-interval'] if 'update-interval' in config else None)
self.program = config['program'] if 'program' in config else None
self.value_type = self.FRACTION
def run(self):
"""
Run the configured program and expect a value in range 0.0..1.0. A negative value
represents a broken sensor.
"""
if not self.program:
logging.debug("No program configured")
self.update(-1.0)
return
use_shell = not (isinstance(self.program, (list, tuple)))
logging.debug('Calling %s', self.program if use_shell else ' '.join(self.program))
d = utils.getProcessOutput(self.program[0], self.program[1:] if len(self.program) > 1 else [], env=os.environ)
d.addCallbacks(self.got_output, self.no_output)
def no_output(self, err):
logging.debug("Got %s", err)
self.update(-1.0)
def got_output(self, output):
result_float = float(output)
logging.debug("Got %s --> %f", output, result_float)
self.update(result_float)
class JenkinsJobStateSensor(SensorBase):
"""
Monitors status of Jenkins jobs.
"""
def __init__(self, queue, config):
super(JenkinsJobStateSensor, self).__init__(queue,
config['update-interval'] if 'update-interval' in config else None)
self.url = config['url'] if 'url' in config else None
self.value_type = self.STATE
def run(self):
if self.url:
try:
logging.debug('Fetching Jenkins data from URL: %s/lastBuild/api/json', self.url)
data = json.loads(urllib2.urlopen('{}/lastBuild/api/json'.format(self.url)).read())
state = 'BUILDING' if data['building'] else data['result']
except:
state = -1
else:
state = -1
self.update(state)
class Lamp(object):
def __init__(self, parent, queue, config):
self.queue = queue
self.width = config['width'] if 'width' in config else 100
self.height = config['height'] if 'height' in config else 100
self.radius = config['radius'] if 'radius' in config else 1.0
self.on_color = config['on-color'] if 'on-color' in config else '#80ff80'
self.off_color = config['off-color'] if 'off-color' in config else '#ff0000'
self.min_color = config['min-color'] if 'min-color' in config else 'green'
self.max_color = config['max-color'] if 'max-color' in config else 'red'
self.broken_color = config['broken-color'] if 'broken-color' in config else '#000000'
self.default_color = config['default-color'] if 'default-color' in config else self.broken_color
self.state_colors = config['state-colors'] if 'state-colors' in config else {}
self.shape = config['shape'] if 'shape' in config else 'round'
self.widget = Canvas(parent, height=self.height, width=self.width)
self.shader = ColorShade(parent.winfo_rgb(self.min_color), parent.winfo_rgb(self.max_color))
def update(self):
try:
data = self.queue.get(0)
except Queue.Empty:
return
logging.debug('Got sensor data %s', str(data['result']))
if data['value-type'] == SensorBase.FRACTION:
fraction = data['result']
if fraction < 0.0:
logging.debug('Light is broken: %d => %s', fraction, self.broken_color)
color = self.broken_color
else:
if fraction > 1.0:
fraction = 1.0
logging.debug('Light is on by: %f', fraction)
color = self.shader.shade(fraction)
elif data['value-type'] == SensorBase.STATE:
state = str(data['result'])
if state not in self.state_colors:
logging.debug('Unknown light state: %s => %s', state, self.default_color)
color = self.broken_color
else:
logging.debug('Light is on: %s => %s', state, self.state_colors[state])
color = self.state_colors[state]
else:
status = data['result']
if int(status) < 0:
logging.debug('Light is broken: %d => %s', status, self.broken_color)
color = self.broken_color
elif int(status) > 0:
logging.debug('Light is off: %d', status)
color = self.off_color
else:
logging.debug('Light is on: %d', status)
color = self.on_color
self.draw(color)
def draw(self, color):
self.widget.delete('all')
margin = 1 + (self.width - self.width * self.radius) / 2
if self.shape == 'square':
self.widget.create_rectangle(margin, margin, self.width - margin, self.height - margin, fill=color)
else:
self.widget.create_oval(margin, margin, self.width - margin, self.height - margin, fill=color)
class Meter(object):
"""
A meter type emitter which can be configured as a pie or arc and change color.
"""
def __init__(self, parent, queue, config):
self.queue = queue
self.width = config['width'] if 'width' in config else 100
self.height = config['height'] if 'height' in config else 100
self.min_color = config['min-color'] if 'min-color' in config else '#00ff00'
self.max_color = config['max-color'] if 'max-color' in config else '#ff0000'
self.broken_color = config['broken-color'] if 'broken-color' in config else '#000000'
self.start_angle = config['start-angle'] if 'start-angle' in config else 270.0
self.max_angle = config['end-angle'] if 'end-angle' in config else 360.0
self.max_angle = config['max-angle'] if 'max-angle' in config else 360.0
self.thickness = config['thickness'] if 'thickness' in config else 0.5
self.widget = Canvas(parent, height=self.height, width=self.width)
# Maintain a color diff list to make calculation easier
self.shader = ColorShade(parent.winfo_rgb(self.min_color), parent.winfo_rgb(self.max_color))
def update(self):
try:
data = self.queue.get(0)
status = float(data['result'])
except Queue.Empty:
return
logging.debug('Got sensor data %f', status)
if status < 0.0:
logging.debug('Sensor is broken: %f => %s', status, self.broken_color)
color = self.broken_color
status = 0.5
else:
if status > 1.0:
status = 1.0
color = self.shader.shade(status)
start_angle = self.start_angle
end_angle = -self.max_angle * status
logging.debug('Meter settings: %s, %f, %f', color, start_angle, end_angle)
edgesize = self.width * self.thickness * 0.5
margin = 1 + edgesize / 2
self.widget.delete('all')
self.widget.create_arc(margin, margin, self.width - margin, self.height - margin, start=start_angle,
extent=end_angle, outline=color, style="arc", width=edgesize)
class Broken(object):
def __init__(self, parent, queue, config):
self.queue = queue
self.width = config['width'] if 'width' in config else 100
self.height = config['height'] if 'height' in config else 100
self.widget = Canvas(parent, height=self.height, width=self.width)
self.draw()
def update(self):
try:
data = self.queue.get(0)
except Queue.Empty:
return
logging.debug('Got sensor data %s', str(data['result']))
def draw(self):
self.widget.delete('all')
x0 = int(0.1 * float(self.width))
w = self.width - int(0.1 * float(self.width))
y0 = int(0.1 * float(self.height))
h = self.height - int(0.1 * float(self.height))
self.widget.create_line(x0, y0, w, h, fill='red', width=4)
self.widget.create_line(w, y0, x0, h, fill='red', width=4)
class Space(object):
"""Creates an empty space on the dashboard"""
def __init__(self, parent, config):
"""
Constructor. The value of the "space" entry in the "config" dict may contain a string: <int>x<int>, which will
then be interpreted as WIDTHxHEIGHT and override any other width/height settings in the config.
:param parent: Tkinter widget
:param config: configuration dict
"""
if config['space'] and re.match('^\d+x\d+$', str(config['space'])):
(w, h) = str(config['space']).split('x')
self.width = int(w)
self.height = int(h)
else:
self.width = config['width'] if 'width' in config else 40
self.height = config['height'] if 'height' in config else 40
self.widget = Frame(parent, height=self.height, width=self.width)
def update(self):
pass
class TinyDashApp:
"""
Tiny dash application class.
Reads config files, creates sensors and emitters, and triggers updates of
emitters.
"""
def __init__(self, parent, args):
"""
Constructor. Reads configuration files and creates the dashboard.
:param parent: Tkinter widget
:param args: parsed command-line arguments in "argparse" format
"""
self.parent = parent
self.dash_frame = Frame(parent)
self.dash_frame.pack(fill=BOTH, expand=1)
self.status_text = StringVar()
self.status_label = Label(parent, textvariable=self.status_text)
self.status_label.pack(side=BOTTOM, fill=X, expand=0, anchor='w')
self.args = args
# Delay thread starts until after all emitters have been created
self.sensors = []
self.emitters = []
# Default settings, also if defaults are reset
self.default_defaults = {'height': 40,
'width': 40,
'update-interval': 5.0, }
# Current default settings, which can be updated by config files
self.defaults = self.default_defaults
for config_file in args.configfiles:
logging.debug('Reading configuration from %s', config_file)
with open(config_file) as fp:
config = yaml.load(fp)
logging.debug('%s', config)
for raw_item in config:
if self._handle_defaults(raw_item):
continue
if self._handle_space(raw_item):
continue
item = deepcopy(self.defaults)
item.update(raw_item)
queue = Queue.Queue()
thing = None
if item['sensor'] == 'Status':
logging.debug("Found {}".format(item['sensor']))
thing = StatusSensor(queue, item)
elif item['sensor'] == 'State':
logging.debug("Found {}".format(item['sensor']))
thing = StateSensor(queue, item)
elif item['sensor'] == 'Fraction':
logging.debug("Found {}".format(item['sensor']))
thing = FractionSensor(queue, item)
elif item['sensor'] == 'JenkinsJobState':
logging.debug("Found {}".format(item['sensor']))
thing = JenkinsJobStateSensor(queue, item)
if not thing:
print "Error: {} is an unknown sensor type".format(item["sensor"])
continue
self.sensors.append(thing)
if 'type' in item:
try:
class_ = getattr(sys.modules[__name__], item['type'])
indicator = class_(self.dash_frame, queue, item)
self.emitters.append(indicator)
if 'name' in item:
indicator.widget.bind('<Enter>',
lambda event, name=item['name']: self.status_text.set(name))
indicator.widget.bind('<Leave>', lambda event: self.status_text.set(''))
except Exception as e:
logging.error(e.message)
indicator = Broken(self.dash_frame, queue, item)
self.emitters.append(indicator)
status_text = ''
if 'name' in item:
status_text = item['name'] + ': '
status_text += 'Error: ' + e.message
indicator.widget.bind('<Enter>',
lambda event, name=status_text: self.status_text.set(name))
indicator.widget.bind('<Leave>', lambda event: self.status_text.set(''))
else:
logging.warning('Sensor %s not connected to an indicator.',
item['name'] if 'name' in item else item['sensor'])
indicator = Broken(self.dash_frame, queue, item)
self.emitters.append(indicator)
status_text = ''
if 'name' in item:
status_text = item['name'] + ': '
status_text += 'Error: not connected to an indicator'
indicator.widget.bind('<Enter>',
lambda event, name=status_text: self.status_text.set(name))
indicator.widget.bind('<Leave>', lambda event: self.status_text.set(''))
logging.debug("Laying out dashboard")
if self.args.geometry:
geometry = self.args.geometry
else:
geometry = self.load_saved_geometry()
if geometry:
self.parent.geometry(geometry)
self.parent.update()
self.layout()
logging.debug("Starting sensors")
# Start sensors
for s in self.sensors:
s.start()
# And go with updates
self.timer = parent.after(100, self.refresh)
def _handle_defaults(self, item):
"""Handles "defaults" type of configuration entries."""
if 'defaults+' in item:
if not item['defaults+']:
self.defaults = self.default_defaults
logging.debug('Defaults resetted: %s', self.defaults)
else:
self.defaults.update(item['defaults+'])
logging.debug('Defaults updated: %s', self.defaults)
return True
if 'defaults' in item:
self.defaults = self.default_defaults
if not item['defaults']:
return True
self.defaults.update(item['defaults'])
logging.debug('Defaults set: %s', self.defaults)
return True
return False
def _handle_space(self, item):
"""Handles "space" type of configuration entries."""
if 'space' in item:
self.emitters.append(Space(self.dash_frame, item))
return True
return False
def layout(self, *args):
logging.debug('Window width: %d', self.parent.winfo_width())
max_x = self.parent.winfo_width() if self.parent.winfo_width() > 1 else 600
x = 0
y = 0
delta_y = 0
for emitter in self.emitters:
if emitter.width + x > max_x:
y = y + delta_y
x = 0
delta_y = 0
emitter.widget.place(x=x, y=y)
x += emitter.width
if emitter.height > delta_y:
delta_y = emitter.height
def refresh(self):
self.timer = self.parent.after(100, self.refresh)
for emitter in self.emitters:
emitter.update()
def load_saved_geometry(self):
geometry_file = os.path.join(self.args.config_dir, 'geometry')
if not os.path.exists(geometry_file):
return None
logging.debug('Loading geometry from %s', geometry_file)
with open(geometry_file) as fp:
return fp.read()
def save_geometry(self):
if not self.args.geometry:
# Only save geometry if started without the argument
geometry_file = os.path.join(self.args.config_dir, 'geometry')
if not os.path.exists(self.args.config_dir):
os.makedirs(self.args.config_dir)
logging.debug('Saving geometry %s in %s', self.parent.geometry(), geometry_file)
with open(geometry_file, 'w') as fp:
fp.write(self.parent.geometry())
def on_closing(self, *args):
self.save_geometry()
self.parent.destroy()
reactor.stop()
def parse_args():
parser = argparse.ArgumentParser(description='Simple dashboard application')
parser.add_argument('configfiles',
nargs='*',
default=[os.path.join(os.path.expanduser('~'), '.tiny-dash', 'config')],
help='Dashboard configuration files to read.')
parser.add_argument('--config-dir',
default=os.path.join(os.path.expanduser('~'), '.tiny-dash'),
help='Location of tiny-dash configuration files. Default is ~/.tiny-dash')
parser.add_argument('--geometry',
help='Size and position of the window')
parser.add_argument('--debug',
action='store_true',
help='Emit debugging information')
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
logging.debug('Parsed arguments: %s', args)
return args
if __name__ == '__main__':
root = Tk()
tksupport.install(root)
tiny = TinyDashApp(root, parse_args())
root.protocol("WM_DELETE_WINDOW", tiny.on_closing)
def reconfigure(event):
tiny.layout()
root.bind('<Configure>', tiny.layout)
root.bind('<Control-q>', tiny.on_closing)
reactor.run()
|
Gustra/tiny-dash
|
bin/tiny-dash.py
|
Python
|
mit
| 24,479
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import hashlib
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.util import tf_inspect
def assert_integer_form(x,
data=None,
summarize=None,
message=None,
int_dtype=None,
name="assert_integer_form"):
"""Assert that x has integer components (or floats equal to integers).
Args:
x: Floating-point `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
int_dtype: A `tf.dtype` used to cast the float to. The default (`None`)
implies the smallest possible signed int will be used for casting.
name: A name for this operation (optional).
Returns:
Op raising `InvalidArgumentError` if `cast(x, int_dtype) != x`.
"""
with ops.name_scope(name, values=[x, data]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return control_flow_ops.no_op()
message = message or "{} has non-integer components".format(x)
if int_dtype is None:
try:
int_dtype = {
dtypes.float16: dtypes.int16,
dtypes.float32: dtypes.int32,
dtypes.float64: dtypes.int64,
}[x.dtype.base_dtype]
except KeyError:
raise TypeError("Unrecognized type {}".format(x.dtype.name))
return check_ops.assert_equal(
x,
math_ops.cast(math_ops.cast(x, int_dtype), x.dtype),
data=data,
summarize=summarize,
message=message,
name=name)
def assert_symmetric(matrix):
matrix_t = array_ops.matrix_transpose(matrix)
return control_flow_ops.with_dependencies(
[check_ops.assert_equal(matrix, matrix_t)], matrix)
def embed_check_nonnegative_integer_form(
x, name="embed_check_nonnegative_integer_form"):
"""Assert x is a non-negative tensor, and optionally of integers."""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
assertions = [
check_ops.assert_non_negative(
x, message="'{}' must be non-negative.".format(x)),
]
if not x.dtype.is_integer:
assertions += [
assert_integer_form(
x,
message="'{}' cannot contain fractional components.".format(x)),
]
return control_flow_ops.with_dependencies(assertions, x)
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`bool` `Tensor` representing if both tensors have the same shape.
"""
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# Here we can't just do math_ops.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
def all_shapes_equal():
return math_ops.reduce_all(
math_ops.equal(
array_ops.concat(
[array_ops.shape(a), array_ops.shape(b)], 0),
array_ops.concat(
[array_ops.shape(b), array_ops.shape(a)], 0)))
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
all_shapes_equal, lambda: constant_op.constant(False))
def maybe_get_static_value(x, dtype=None):
"""Helper which tries to return a static value.
Given `x`, extract it's value statically, optionally casting to a specific
dtype. If this is not possible, None is returned.
Args:
x: `Tensor` for which to extract a value statically.
dtype: Optional dtype to cast to.
Returns:
Statically inferred value if possible, otherwise None.
"""
if x is None:
return x
try:
# This returns an np.ndarray.
x_ = tensor_util.constant_value(x)
except TypeError:
x_ = x
if x_ is None or dtype is None:
return x_
return np.array(x_, dtype)
def get_logits_and_probs(logits=None,
probs=None,
multidimensional=False,
validate_args=False,
name="get_logits_and_probs",
dtype=None):
"""Converts logit to probabilities (or vice-versa), and returns both.
Args:
logits: Floating-point `Tensor` representing log-odds.
probs: Floating-point `Tensor` representing probabilities.
multidimensional: Python `bool`, default `False`. If `True`, represents
whether the last dimension of `logits` or `probs`, a `[N1, N2, ... k]`
dimensional tensor, representing the logit or probability of `shape[-1]`
classes.
validate_args: Python `bool`, default `False`. When `True`, either assert `0
<= probs <= 1` (if not `multidimensional`) or that the last dimension of
`probs` sums to one.
name: A name for this operation (optional).
dtype: `tf.DType` to prefer when converting args to `Tensor`s.
Returns:
logits, probs: Tuple of `Tensor`s. If `probs` has an entry that is `0` or
`1`, then the corresponding entry in the returned logit will be `-Inf` and
`Inf` respectively.
Raises:
ValueError: if neither `probs` nor `logits` were passed in, or both were.
"""
with ops.name_scope(name, values=[probs, logits]):
if (probs is None) == (logits is None):
raise ValueError("Must pass probs or logits, but not both.")
if probs is None:
logits = ops.convert_to_tensor(logits, name="logits", dtype=dtype)
if not logits.dtype.is_floating:
raise TypeError("logits must having floating type.")
# We can early return since we constructed probs and therefore know
# they're valid.
if multidimensional:
if validate_args:
logits = embed_check_categorical_event_shape(logits)
return logits, nn.softmax(logits, name="probs")
return logits, math_ops.sigmoid(logits, name="probs")
probs = ops.convert_to_tensor(probs, name="probs", dtype=dtype)
if not probs.dtype.is_floating:
raise TypeError("probs must having floating type.")
if validate_args:
with ops.name_scope("validate_probs"):
one = constant_op.constant(1., probs.dtype)
dependencies = [check_ops.assert_non_negative(probs)]
if multidimensional:
probs = embed_check_categorical_event_shape(probs)
dependencies += [
check_ops.assert_near(
math_ops.reduce_sum(probs, -1),
one,
message="probs does not sum to 1.")
]
else:
dependencies += [
check_ops.assert_less_equal(
probs, one, message="probs has components greater than 1.")
]
probs = control_flow_ops.with_dependencies(dependencies, probs)
with ops.name_scope("logits"):
if multidimensional:
# Here we don't compute the multidimensional case, in a manner
# consistent with respect to the unidimensional case. We do so
# following the TF convention. Typically, you might expect to see
# logits = log(probs) - log(probs[pivot]). A side-effect of
# being consistent with the TF approach is that the unidimensional case
# implicitly handles the second dimension but the multidimensional case
# explicitly keeps the pivot dimension.
return math_ops.log(probs), probs
return math_ops.log(probs) - math_ops.log1p(-1. * probs), probs
def _is_known_unsigned_by_dtype(dt):
"""Helper returning True if dtype is known to be unsigned."""
return {
dtypes.bool: True,
dtypes.uint8: True,
dtypes.uint16: True,
}.get(dt.base_dtype, False)
def _is_known_signed_by_dtype(dt):
"""Helper returning True if dtype is known to be signed."""
return {
dtypes.float16: True,
dtypes.float32: True,
dtypes.float64: True,
dtypes.int8: True,
dtypes.int16: True,
dtypes.int32: True,
dtypes.int64: True,
}.get(dt.base_dtype, False)
def _is_known_dtype(dt):
"""Helper returning True if dtype is known."""
return _is_known_unsigned_by_dtype(dt) or _is_known_signed_by_dtype(dt)
def _largest_integer_by_dtype(dt):
"""Helper returning the largest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if dt.is_floating:
return int(2**(np.finfo(dt.as_numpy_dtype).nmant + 1))
if dt.is_integer:
return np.iinfo(dt.as_numpy_dtype).max
if dt.base_dtype == dtypes.bool:
return int(1)
# We actually can't land here but keep the case for completeness.
raise TypeError("Unrecognized dtype: {}".format(dt.name))
def _smallest_integer_by_dtype(dt):
"""Helper returning the smallest integer exactly representable by dtype."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
if _is_known_unsigned_by_dtype(dt):
return 0
return -1 * _largest_integer_by_dtype(dt)
def _is_integer_like_by_dtype(dt):
"""Helper returning True if dtype.is_integer or is `bool`."""
if not _is_known_dtype(dt):
raise TypeError("Unrecognized dtype: {}".format(dt.name))
return dt.is_integer or dt.base_dtype == dtypes.bool
def embed_check_categorical_event_shape(
categorical_param, name="embed_check_categorical_event_shape"):
"""Embeds checks that categorical distributions don't have too many classes.
A categorical-type distribution is one which, e.g., returns the class label
rather than a one-hot encoding. E.g., `Categorical(probs)`.
Since distributions output samples in the same dtype as the parameters, we
must ensure that casting doesn't lose precision. That is, the
`parameter.dtype` implies a maximum number of classes. However, since shape is
`int32` and categorical variables are presumed to be indexes into a `Tensor`,
we must also ensure that the number of classes is no larger than the largest
possible `int32` index, i.e., `2**31-1`.
In other words the number of classes, `K`, must satisfy the following
condition:
```python
K <= min(
int(2**31 - 1), # Largest float as an index.
{
dtypes.float16: int(2**11), # Largest int as a float16.
dtypes.float32: int(2**24),
dtypes.float64: int(2**53),
}.get(categorical_param.dtype.base_dtype, 0))
```
Args:
categorical_param: Floating-point `Tensor` representing parameters of
distribution over categories. The rightmost shape is presumed to be the
number of categories.
name: A name for this operation (optional).
Returns:
categorical_param: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `categorical_param` has an unknown `dtype`.
ValueError: if we can statically identify `categorical_param` as being too
large (for being closed under int32/float casting).
"""
with ops.name_scope(name, values=[categorical_param]):
x = ops.convert_to_tensor(categorical_param, name="categorical_param")
# The size must not exceed both of:
# - The largest possible int32 (since categorical values are presumed to be
# indexes into a Tensor).
# - The largest possible integer exactly representable under the given
# floating-point dtype (since we need to cast to/from).
#
# The chosen floating-point thresholds are 2**(1 + mantissa_bits).
# For more details, see:
# https://en.wikipedia.org/wiki/Floating-point_arithmetic#Internal_representation
x_dtype = x.dtype.base_dtype
max_event_size = (
_largest_integer_by_dtype(x_dtype) if x_dtype.is_floating else 0)
if max_event_size == 0:
raise TypeError("Unable to validate size of unrecognized dtype "
"({}).".format(x_dtype.name))
try:
x_shape_static = x.get_shape().with_rank_at_least(1)
except ValueError:
raise ValueError("A categorical-distribution parameter must have "
"at least 1 dimension.")
if tensor_shape.dimension_value(x_shape_static[-1]) is not None:
event_size = x_shape_static.dims[-1].value
if event_size < 2:
raise ValueError("A categorical-distribution parameter must have at "
"least 2 events.")
if event_size > max_event_size:
raise ValueError("Number of classes exceeds `dtype` precision, i.e., "
"{} implies shape ({}) cannot exceed {}.".format(
x_dtype.name, event_size, max_event_size))
return x
else:
event_size = array_ops.shape(x, name="x_shape")[-1]
return control_flow_ops.with_dependencies([
check_ops.assert_rank_at_least(
x,
1,
message=("A categorical-distribution parameter must have "
"at least 1 dimension.")),
check_ops.assert_greater_equal(
array_ops.shape(x)[-1],
2,
message=("A categorical-distribution parameter must have at "
"least 2 events.")),
check_ops.assert_less_equal(
event_size,
max_event_size,
message="Number of classes exceeds `dtype` precision, "
"i.e., {} dtype cannot exceed {} shape.".format(
x_dtype.name, max_event_size)),
], x)
def embed_check_integer_casting_closed(x,
target_dtype,
assert_nonnegative=True,
name="embed_check_casting_closed"):
"""Ensures integers remain unaffected despite casting to/from int/float types.
Example integer-types: `uint8`, `int32`, `bool`.
Example floating-types: `float32`, `float64`.
The largest possible integer representable by an IEEE754 floating-point is
`2**(1 + mantissa_bits)` yet the largest possible integer as an int-type is
`2**(bits - 1) - 1`. This function ensures that a `Tensor` purporting to have
integer-form values can be cast to some other type without loss of precision.
The smallest representable integer is the negative of the largest
representable integer, except for types: `uint8`, `uint16`, `bool`. For these
types, the smallest representable integer is `0`.
Args:
x: `Tensor` representing integer-form values.
target_dtype: TF `dtype` under which `x` should have identical values.
assert_nonnegative: `bool` indicating `x` should contain nonnegative values.
name: A name for this operation (optional).
Returns:
x: Input `Tensor` with appropriate assertions embedded.
Raises:
TypeError: if `x` is neither integer- nor floating-type.
TypeError: if `target_dtype` is neither integer- nor floating-type.
TypeError: if neither `x` nor `target_dtype` are integer-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if (not _is_integer_like_by_dtype(x.dtype) and not x.dtype.is_floating):
raise TypeError("{}.dtype must be floating- or "
"integer-type.".format(x.dtype.name))
if (not _is_integer_like_by_dtype(target_dtype) and
not target_dtype.is_floating):
raise TypeError("target_dtype ({}) must be floating- or "
"integer-type.".format(target_dtype.name))
if (not _is_integer_like_by_dtype(x.dtype) and
not _is_integer_like_by_dtype(target_dtype)):
raise TypeError("At least one of {}.dtype ({}) and target_dtype ({}) "
"must be integer-type.".format(x, x.dtype.name,
target_dtype.name))
assertions = []
if assert_nonnegative:
assertions += [
check_ops.assert_non_negative(
x, message="Elements must be non-negative."),
]
if x.dtype.is_floating:
# Being here means _is_integer_like_by_dtype(target_dtype) = True.
# Since this check implies the magnitude check below, we need only it.
assertions += [
assert_integer_form(
x,
int_dtype=target_dtype,
message="Elements must be {}-equivalent.".format(
target_dtype.name)),
]
else:
if (_largest_integer_by_dtype(x.dtype) >
_largest_integer_by_dtype(target_dtype)):
# Cast may lose integer precision.
assertions += [
check_ops.assert_less_equal(
x,
_largest_integer_by_dtype(target_dtype),
message=("Elements cannot exceed {}.".format(
_largest_integer_by_dtype(target_dtype)))),
]
if (not assert_nonnegative and (_smallest_integer_by_dtype(
x.dtype) < _smallest_integer_by_dtype(target_dtype))):
assertions += [
check_ops.assert_greater_equal(
x,
_smallest_integer_by_dtype(target_dtype),
message=("Elements cannot be smaller than {}.".format(
_smallest_integer_by_dtype(target_dtype)))),
]
if not assertions:
return x
return control_flow_ops.with_dependencies(assertions, x)
def log_combinations(n, counts, name="log_combinations"):
"""Multinomial coefficient.
Given `n` and `counts`, where `counts` has last dimension `k`, we compute
the multinomial coefficient as:
```n! / sum_i n_i!```
where `i` runs over all `k` classes.
Args:
n: Floating-point `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
counts: Floating-point `Tensor` broadcastable with `n`. This represents
counts in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
`Tensor` representing the multinomial coefficient between `n` and `counts`.
"""
# First a bit about the number of ways counts could have come in:
# E.g. if counts = [1, 2], then this is 3 choose 2.
# In general, this is (sum counts)! / sum(counts!)
# The sum should be along the last dimension of counts. This is the
# "distribution" dimension. Here n a priori represents the sum of counts.
with ops.name_scope(name, values=[n, counts]):
n = ops.convert_to_tensor(n, name="n")
counts = ops.convert_to_tensor(counts, name="counts")
total_permutations = math_ops.lgamma(n + 1)
counts_factorial = math_ops.lgamma(counts + 1)
redundant_permutations = math_ops.reduce_sum(counts_factorial, axis=[-1])
return total_permutations - redundant_permutations
def matrix_diag_transform(matrix, transform=None, name=None):
"""Transform diagonal of [batch-]matrix, leave rest of matrix unchanged.
Create a trainable covariance defined by a Cholesky factor:
```python
# Transform network layer into 2 x 2 array.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
# Make the diagonal positive. If the upper triangle was zero, this would be a
# valid Cholesky factor.
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# LinearOperatorLowerTriangular ignores the upper triangle.
operator = LinearOperatorLowerTriangular(chol)
```
Example of heteroskedastic 2-D linear regression.
```python
tfd = tfp.distributions
# Get a trainable Cholesky factor.
matrix_values = tf.contrib.layers.fully_connected(activations, 4)
matrix = tf.reshape(matrix_values, (batch_size, 2, 2))
chol = matrix_diag_transform(matrix, transform=tf.nn.softplus)
# Get a trainable mean.
mu = tf.contrib.layers.fully_connected(activations, 2)
# This is a fully trainable multivariate normal!
dist = tfd.MultivariateNormalTriL(mu, chol)
# Standard log loss. Minimizing this will "train" mu and chol, and then dist
# will be a distribution predicting labels as multivariate Gaussians.
loss = -1 * tf.reduce_mean(dist.log_prob(labels))
```
Args:
matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are
equal.
transform: Element-wise function mapping `Tensors` to `Tensors`. To be
applied to the diagonal of `matrix`. If `None`, `matrix` is returned
unchanged. Defaults to `None`.
name: A name to give created ops. Defaults to "matrix_diag_transform".
Returns:
A `Tensor` with same shape and `dtype` as `matrix`.
"""
with ops.name_scope(name, "matrix_diag_transform", [matrix]):
matrix = ops.convert_to_tensor(matrix, name="matrix")
if transform is None:
return matrix
# Replace the diag with transformed diag.
diag = array_ops.matrix_diag_part(matrix)
transformed_diag = transform(diag)
transformed_mat = array_ops.matrix_set_diag(matrix, transformed_diag)
return transformed_mat
def rotate_transpose(x, shift, name="rotate_transpose"):
"""Circularly moves dims left or right.
Effectively identical to:
```python
numpy.transpose(x, numpy.roll(numpy.arange(len(x.shape)), shift))
```
When `validate_args=False` additional graph-runtime checks are
performed. These checks entail moving data from to GPU to CPU.
Example:
```python
x = tf.random.normal([1, 2, 3, 4]) # Tensor of shape [1, 2, 3, 4].
rotate_transpose(x, -1).shape == [2, 3, 4, 1]
rotate_transpose(x, -2).shape == [3, 4, 1, 2]
rotate_transpose(x, 1).shape == [4, 1, 2, 3]
rotate_transpose(x, 2).shape == [3, 4, 1, 2]
rotate_transpose(x, 7).shape == rotate_transpose(x, 3).shape # [2, 3, 4, 1]
rotate_transpose(x, -7).shape == rotate_transpose(x, -3).shape # [4, 1, 2, 3]
```
Args:
x: `Tensor`.
shift: `Tensor`. Number of dimensions to transpose left (shift<0) or
transpose right (shift>0).
name: Python `str`. The name to give this op.
Returns:
rotated_x: Input `Tensor` with dimensions circularly rotated by shift.
Raises:
TypeError: if shift is not integer type.
"""
with ops.name_scope(name, values=[x, shift]):
x = ops.convert_to_tensor(x, name="x")
shift = ops.convert_to_tensor(shift, name="shift")
# We do not assign back to preserve constant-ness.
check_ops.assert_integer(shift)
shift_value_static = tensor_util.constant_value(shift)
ndims = x.get_shape().ndims
if ndims is not None and shift_value_static is not None:
if ndims < 2:
return x
shift_value_static = np.sign(shift_value_static) * (
abs(shift_value_static) % ndims)
if shift_value_static == 0:
return x
perm = np.roll(np.arange(ndims), shift_value_static)
return array_ops.transpose(x, perm=perm)
else:
# Consider if we always had a positive shift, and some specified
# direction.
# When shifting left we want the new array:
# last(x, n-shift) + first(x, shift)
# and if shifting right then we want:
# last(x, shift) + first(x, n-shift)
# Observe that last(a) == slice(a, n) and first(a) == slice(0, a).
# Also, we can encode direction and shift as one: direction * shift.
# Combining these facts, we have:
# a = cond(shift<0, -shift, n-shift)
# last(x, n-a) + first(x, a) == x[a:n] + x[0:a]
# Finally, we transform shift by modulo length so it can be specified
# independently from the array upon which it operates (like python).
ndims = array_ops.rank(x)
shift = array_ops.where(
math_ops.less(shift, 0), math_ops.mod(-shift, ndims),
ndims - math_ops.mod(shift, ndims))
first = math_ops.range(0, shift)
last = math_ops.range(shift, ndims)
perm = array_ops.concat([last, first], 0)
return array_ops.transpose(x, perm=perm)
def pick_vector(cond, true_vector, false_vector, name="pick_vector"):
"""Picks possibly different length row `Tensor`s based on condition.
Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
true_vector: `Tensor` of one dimension. Returned when cond is `True`.
false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: Python `str`. The name to give this op.
Example: ```python pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15,
18)) # [10, 11] pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15,
18)) # [15, 16, 17] ```
Returns:
true_or_false_vector: `Tensor`.
Raises:
TypeError: if `cond.dtype != tf.bool`
TypeError: if `cond` is not a constant and
`true_vector.dtype != false_vector.dtype`
"""
with ops.name_scope(name, values=(cond, true_vector, false_vector)):
cond = ops.convert_to_tensor(cond, name="cond")
if cond.dtype != dtypes.bool:
raise TypeError("%s.dtype=%s which is not %s" %
(cond, cond.dtype, dtypes.bool))
cond_value_static = tensor_util.constant_value(cond)
if cond_value_static is not None:
return true_vector if cond_value_static else false_vector
true_vector = ops.convert_to_tensor(true_vector, name="true_vector")
false_vector = ops.convert_to_tensor(false_vector, name="false_vector")
if true_vector.dtype != false_vector.dtype:
raise TypeError(
"%s.dtype=%s does not match %s.dtype=%s" %
(true_vector, true_vector.dtype, false_vector, false_vector.dtype))
n = array_ops.shape(true_vector)[0]
return array_ops.slice(
array_ops.concat([true_vector, false_vector], 0),
[array_ops.where(cond, 0, n)], [array_ops.where(cond, n, -1)])
def prefer_static_broadcast_shape(shape1,
shape2,
name="prefer_static_broadcast_shape"):
"""Convenience function which statically broadcasts shape when possible.
Args:
shape1: `1-D` integer `Tensor`. Already converted to tensor!
shape2: `1-D` integer `Tensor`. Already converted to tensor!
name: A string name to prepend to created ops.
Returns:
The broadcast shape, either as `TensorShape` (if broadcast can be done
statically), or as a `Tensor`.
"""
with ops.name_scope(name, values=[shape1, shape2]):
def make_shape_tensor(x):
return ops.convert_to_tensor(x, name="shape", dtype=dtypes.int32)
def get_tensor_shape(s):
if isinstance(s, tensor_shape.TensorShape):
return s
s_ = tensor_util.constant_value(make_shape_tensor(s))
if s_ is not None:
return tensor_shape.TensorShape(s_)
return None
def get_shape_tensor(s):
if not isinstance(s, tensor_shape.TensorShape):
return make_shape_tensor(s)
if s.is_fully_defined():
return make_shape_tensor(s.as_list())
raise ValueError("Cannot broadcast from partially "
"defined `TensorShape`.")
shape1_ = get_tensor_shape(shape1)
shape2_ = get_tensor_shape(shape2)
if shape1_ is not None and shape2_ is not None:
return array_ops.broadcast_static_shape(shape1_, shape2_)
shape1_ = get_shape_tensor(shape1)
shape2_ = get_shape_tensor(shape2)
return array_ops.broadcast_dynamic_shape(shape1_, shape2_)
def prefer_static_rank(x):
"""Return static rank of tensor `x` if available, else `tf.rank(x)`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static rank is obtainable), else `Tensor`.
"""
return prefer_static_value(array_ops.rank(x))
def prefer_static_shape(x):
"""Return static shape of tensor `x` if available, else `tf.shape(x)`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static shape is obtainable), else `Tensor`.
"""
return prefer_static_value(array_ops.shape(x))
def prefer_static_value(x):
"""Return static value of tensor `x` if available, else `x`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static value is obtainable), else `Tensor`.
"""
static_x = tensor_util.constant_value(x)
if static_x is not None:
return static_x
return x
def gen_new_seed(seed, salt):
"""Generate a new seed, from the given seed and salt."""
if seed is None:
return None
string = (str(seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
def fill_triangular(x, upper=False, name=None):
"""Creates a (batch of) triangular matrix from a vector of inputs.
Created matrix can be lower- or upper-triangular. (It is more efficient to
create the matrix as upper or lower, rather than transpose.)
Triangular matrix elements are filled in a clockwise spiral. See example,
below.
If `x.get_shape()` is `[b1, b2, ..., bB, d]` then the output shape is
`[b1, b2, ..., bB, n, n]` where `n` is such that `d = n(n+1)/2`, i.e.,
`n = int(np.sqrt(0.25 + 2. * m) - 0.5)`.
Example:
```python
fill_triangular([1, 2, 3, 4, 5, 6])
# ==> [[4, 0, 0],
# [6, 5, 0],
# [3, 2, 1]]
fill_triangular([1, 2, 3, 4, 5, 6], upper=True)
# ==> [[1, 2, 3],
# [0, 5, 6],
# [0, 0, 4]]
```
For comparison, a pure numpy version of this function can be found in
`util_test.py`, function `_fill_triangular`.
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
tril: `Tensor` with lower (or upper) triangular elements filled from `x`.
Raises:
ValueError: if `x` cannot be mapped to a triangular matrix.
"""
with ops.name_scope(name, "fill_triangular", values=[x]):
x = ops.convert_to_tensor(x, name="x")
if tensor_shape.dimension_value(
x.shape.with_rank_at_least(1)[-1]) is not None:
# Formula derived by solving for n: m = n(n+1)/2.
m = np.int32(x.shape.dims[-1].value)
n = np.sqrt(0.25 + 2. * m) - 0.5
if n != np.floor(n):
raise ValueError("Input right-most shape ({}) does not "
"correspond to a triangular matrix.".format(m))
n = np.int32(n)
static_final_shape = x.shape[:-1].concatenate([n, n])
else:
m = array_ops.shape(x)[-1]
# For derivation, see above. Casting automatically lops off the 0.5, so we
# omit it. We don't validate n is an integer because this has
# graph-execution cost; an error will be thrown from the reshape, below.
n = math_ops.cast(
math_ops.sqrt(0.25 + math_ops.cast(2 * m, dtype=dtypes.float32)),
dtype=dtypes.int32)
static_final_shape = x.shape.with_rank_at_least(1)[:-1].concatenate(
[None, None])
# We now concatenate the "tail" of `x` to `x` (and reverse one of them).
#
# We do this based on the insight that the input `x` provides `ceil(n/2)`
# rows of an `n x n` matrix, some of which will get zeroed out being on the
# wrong side of the diagonal. The first row will not get zeroed out at all,
# and we need `floor(n/2)` more rows, so the first is what we omit from
# `x_tail`. If we then stack those `ceil(n/2)` rows with the `floor(n/2)`
# rows provided by a reversed tail, it is exactly the other set of elements
# of the reversed tail which will be zeroed out for being on the wrong side
# of the diagonal further up/down the matrix. And, in doing-so, we've filled
# the triangular matrix in a clock-wise spiral pattern. Neat!
#
# Try it out in numpy:
# n = 3
# x = np.arange(n * (n + 1) / 2)
# m = x.shape[0]
# n = np.int32(np.sqrt(.25 + 2 * m) - .5)
# x_tail = x[(m - (n**2 - m)):]
# np.concatenate([x_tail, x[::-1]], 0).reshape(n, n) # lower
# # ==> array([[3, 4, 5],
# [5, 4, 3],
# [2, 1, 0]])
# np.concatenate([x, x_tail[::-1]], 0).reshape(n, n) # upper
# # ==> array([[0, 1, 2],
# [3, 4, 5],
# [5, 4, 3]])
#
# Note that we can't simply do `x[..., -(n**2 - m):]` because this doesn't
# correctly handle `m == n == 1`. Hence, we do nonnegative indexing.
# Furthermore observe that:
# m - (n**2 - m)
# = n**2 / 2 + n / 2 - (n**2 - n**2 / 2 + n / 2)
# = 2 (n**2 / 2 + n / 2) - n**2
# = n**2 + n - n**2
# = n
ndims = prefer_static_rank(x)
if upper:
x_list = [x, array_ops.reverse(x[..., n:], axis=[ndims - 1])]
else:
x_list = [x[..., n:], array_ops.reverse(x, axis=[ndims - 1])]
new_shape = (
static_final_shape.as_list() if static_final_shape.is_fully_defined()
else array_ops.concat([array_ops.shape(x)[:-1], [n, n]], axis=0))
x = array_ops.reshape(array_ops.concat(x_list, axis=-1), new_shape)
x = array_ops.matrix_band_part(
x, num_lower=(0 if upper else -1), num_upper=(-1 if upper else 0))
x.set_shape(static_final_shape)
return x
def fill_triangular_inverse(x, upper=False, name=None):
"""Creates a vector from a (batch of) triangular matrix.
The vector is created from the lower-triangular or upper-triangular portion
depending on the value of the parameter `upper`.
If `x.shape` is `[b1, b2, ..., bB, n, n]` then the output shape is
`[b1, b2, ..., bB, d]` where `d = n (n + 1) / 2`.
Example:
```python
fill_triangular_inverse(
[[4, 0, 0],
[6, 5, 0],
[3, 2, 1]])
# ==> [1, 2, 3, 4, 5, 6]
fill_triangular_inverse(
[[1, 2, 3],
[0, 5, 6],
[0, 0, 4]], upper=True)
# ==> [1, 2, 3, 4, 5, 6]
```
Args:
x: `Tensor` representing lower (or upper) triangular elements.
upper: Python `bool` representing whether output matrix should be upper
triangular (`True`) or lower triangular (`False`, default).
name: Python `str`. The name to give this op.
Returns:
flat_tril: (Batch of) vector-shaped `Tensor` representing vectorized lower
(or upper) triangular elements from `x`.
"""
with ops.name_scope(name, "fill_triangular_inverse", values=[x]):
x = ops.convert_to_tensor(x, name="x")
if tensor_shape.dimension_value(
x.shape.with_rank_at_least(2)[-1]) is not None:
n = np.int32(x.shape.dims[-1].value)
m = np.int32((n * (n + 1)) // 2)
static_final_shape = x.shape[:-2].concatenate([m])
else:
n = array_ops.shape(x)[-1]
m = (n * (n + 1)) // 2
static_final_shape = x.shape.with_rank_at_least(2)[:-2].concatenate(
[None])
ndims = prefer_static_rank(x)
if upper:
initial_elements = x[..., 0, :]
triangular_portion = x[..., 1:, :]
else:
initial_elements = array_ops.reverse(x[..., -1, :], axis=[ndims - 2])
triangular_portion = x[..., :-1, :]
rotated_triangular_portion = array_ops.reverse(
array_ops.reverse(triangular_portion, axis=[ndims - 1]),
axis=[ndims - 2])
consolidated_matrix = triangular_portion + rotated_triangular_portion
end_sequence = array_ops.reshape(
consolidated_matrix,
array_ops.concat([array_ops.shape(x)[:-2], [n * (n - 1)]], axis=0))
y = array_ops.concat([initial_elements, end_sequence[..., :m - n]], axis=-1)
y.set_shape(static_final_shape)
return y
def tridiag(below=None, diag=None, above=None, name=None):
"""Creates a matrix with values set above, below, and on the diagonal.
Example:
```python
tridiag(below=[1., 2., 3.],
diag=[4., 5., 6., 7.],
above=[8., 9., 10.])
# ==> array([[ 4., 8., 0., 0.],
# [ 1., 5., 9., 0.],
# [ 0., 2., 6., 10.],
# [ 0., 0., 3., 7.]], dtype=float32)
```
Warning: This Op is intended for convenience, not efficiency.
Args:
below: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the below
diagonal part. `None` is logically equivalent to `below = 0`.
diag: `Tensor` of shape `[B1, ..., Bb, d]` corresponding to the diagonal
part. `None` is logically equivalent to `diag = 0`.
above: `Tensor` of shape `[B1, ..., Bb, d-1]` corresponding to the above
diagonal part. `None` is logically equivalent to `above = 0`.
name: Python `str`. The name to give this op.
Returns:
tridiag: `Tensor` with values set above, below and on the diagonal.
Raises:
ValueError: if all inputs are `None`.
"""
def _pad(x):
"""Prepends and appends a zero to every vector in a batch of vectors."""
shape = array_ops.concat([array_ops.shape(x)[:-1], [1]], axis=0)
z = array_ops.zeros(shape, dtype=x.dtype)
return array_ops.concat([z, x, z], axis=-1)
def _add(*x):
"""Adds list of Tensors, ignoring `None`."""
s = None
for y in x:
if y is None:
continue
elif s is None:
s = y
else:
s += y
if s is None:
raise ValueError("Must specify at least one of `below`, `diag`, `above`.")
return s
with ops.name_scope(name, "tridiag", [below, diag, above]):
if below is not None:
below = ops.convert_to_tensor(below, name="below")
below = array_ops.matrix_diag(_pad(below))[..., :-1, 1:]
if diag is not None:
diag = ops.convert_to_tensor(diag, name="diag")
diag = array_ops.matrix_diag(diag)
if above is not None:
above = ops.convert_to_tensor(above, name="above")
above = array_ops.matrix_diag(_pad(above))[..., 1:, :-1]
# TODO(jvdillon): Consider using scatter_nd instead of creating three full
# matrices.
return _add(below, diag, above)
def reduce_weighted_logsumexp(logx,
w=None,
axis=None,
keep_dims=False,
return_sign=False,
name=None):
"""Computes `log(abs(sum(weight * exp(elements across tensor dimensions))))`.
If all weights `w` are known to be positive, it is more efficient to directly
use `reduce_logsumexp`, i.e., `tf.reduce_logsumexp(logx + tf.math.log(w))` is
more
efficient than `du.reduce_weighted_logsumexp(logx, w)`.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(w * exp(input))). It
avoids overflows caused by taking the exp of large inputs and underflows
caused by taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0, 0],
[0, 0, 0]])
w = tf.constant([[-1., 1, 1],
[1, 1, 1]])
du.reduce_weighted_logsumexp(x, w)
# ==> log(-1*1 + 1*1 + 1*1 + 1*1 + 1*1 + 1*1) = log(4)
du.reduce_weighted_logsumexp(x, w, axis=0)
# ==> [log(-1+1), log(1+1), log(1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1)
# ==> [log(-1+1+1), log(1+1+1)]
du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)
# ==> [[log(-1+1+1)], [log(1+1+1)]]
du.reduce_weighted_logsumexp(x, w, axis=[0, 1])
# ==> log(-1+5)
```
Args:
logx: The tensor to reduce. Should have numeric type.
w: The weight tensor. Should have numeric type identical to `logx`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
return_sign: If `True`, returns the sign of the result.
name: A name for the operation (optional).
Returns:
lswe: The `log(abs(sum(weight * exp(x))))` reduced tensor.
sign: (Optional) The sign of `sum(weight * exp(x))`.
"""
with ops.name_scope(name, "reduce_weighted_logsumexp", [logx, w]):
logx = ops.convert_to_tensor(logx, name="logx")
if w is None:
lswe = math_ops.reduce_logsumexp(logx, axis=axis, keepdims=keep_dims)
if return_sign:
sgn = array_ops.ones_like(lswe)
return lswe, sgn
return lswe
w = ops.convert_to_tensor(w, dtype=logx.dtype, name="w")
log_absw_x = logx + math_ops.log(math_ops.abs(w))
max_log_absw_x = math_ops.reduce_max(log_absw_x, axis=axis, keepdims=True)
# If the largest element is `-inf` or `inf` then we don't bother subtracting
# off the max. We do this because otherwise we'd get `inf - inf = NaN`. That
# this is ok follows from the fact that we're actually free to subtract any
# value we like, so long as we add it back after taking the `log(sum(...))`.
max_log_absw_x = array_ops.where(
math_ops.is_inf(max_log_absw_x), array_ops.zeros_like(max_log_absw_x),
max_log_absw_x)
wx_over_max_absw_x = (
math_ops.sign(w) * math_ops.exp(log_absw_x - max_log_absw_x))
sum_wx_over_max_absw_x = math_ops.reduce_sum(
wx_over_max_absw_x, axis=axis, keepdims=keep_dims)
if not keep_dims:
max_log_absw_x = array_ops.squeeze(max_log_absw_x, axis)
sgn = math_ops.sign(sum_wx_over_max_absw_x)
lswe = max_log_absw_x + math_ops.log(sgn * sum_wx_over_max_absw_x)
if return_sign:
return lswe, sgn
return lswe
# TODO(jvdillon): Merge this test back into:
# tensorflow/python/ops/softplus_op_test.py
# once TF core is accepting new ops.
def softplus_inverse(x, name=None):
"""Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)).
Mathematically this op is equivalent to:
```none
softplus_inverse = log(exp(x) - 1.)
```
Args:
x: `Tensor`. Non-negative (not enforced), floating-point.
name: A name for the operation (optional).
Returns:
`Tensor`. Has the same type/shape as input `x`.
"""
with ops.name_scope(name, "softplus_inverse", values=[x]):
x = ops.convert_to_tensor(x, name="x")
# We begin by deriving a more numerically stable softplus_inverse:
# x = softplus(y) = Log[1 + exp{y}], (which means x > 0).
# ==> exp{x} = 1 + exp{y} (1)
# ==> y = Log[exp{x} - 1] (2)
# = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}]
# = Log[(1 - exp{-x}) / 1] + Log[exp{x}]
# = Log[1 - exp{-x}] + x (3)
# (2) is the "obvious" inverse, but (3) is more stable than (2) for large x.
# For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will
# be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0.
#
# In addition to the numerically stable derivation above, we clamp
# small/large values to be congruent with the logic in:
# tensorflow/core/kernels/softplus_op.h
#
# Finally, we set the input to one whenever the input is too large or too
# small. This ensures that no unchosen codepath is +/- inf. This is
# necessary to ensure the gradient doesn't get NaNs. Recall that the
# gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false`
# thus an `inf` in an unselected path results in `0*inf=nan`. We are careful
# to overwrite `x` with ones only when we will never actually use this
# value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`.
threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2.
is_too_small = math_ops.less(x, np.exp(threshold))
is_too_large = math_ops.greater(x, -threshold)
too_small_value = math_ops.log(x)
too_large_value = x
# This `where` will ultimately be a NOP because we won't select this
# codepath whenever we used the surrogate `ones_like`.
x = array_ops.where(
math_ops.logical_or(is_too_small, is_too_large), array_ops.ones_like(x),
x)
y = x + math_ops.log(-math_ops.expm1(-x)) # == log(expm1(x))
return array_ops.where(is_too_small, too_small_value,
array_ops.where(is_too_large, too_large_value, y))
# TODO(b/35290280): Add unit-tests.
def dimension_size(x, axis):
"""Returns the size of a specific dimension."""
# Since tf.gather isn't "constant-in, constant-out", we must first check the
# static shape or fallback to dynamic shape.
s = tensor_shape.dimension_value(
x.shape.with_rank_at_least(np.abs(axis))[axis])
if s is not None:
return s
return array_ops.shape(x)[axis]
def process_quadrature_grid_and_probs(quadrature_grid_and_probs,
dtype,
validate_args,
name=None):
"""Validates quadrature grid, probs or computes them as necessary.
Args:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight. When `None`, defaults to:
`np.polynomial.hermite.hermgauss(deg=8)`.
dtype: The expected `dtype` of `grid` and `probs`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
name: Python `str` name prefixed to Ops created by this class.
Returns:
quadrature_grid_and_probs: Python pair of `float`-like `Tensor`s
representing the sample points and the corresponding (possibly
normalized) weight.
Raises:
ValueError: if `quadrature_grid_and_probs is not None` and
`len(quadrature_grid_and_probs[0]) != len(quadrature_grid_and_probs[1])`
"""
with ops.name_scope(name, "process_quadrature_grid_and_probs",
[quadrature_grid_and_probs]):
if quadrature_grid_and_probs is None:
grid, probs = np.polynomial.hermite.hermgauss(deg=8)
grid = grid.astype(dtype.as_numpy_dtype)
probs = probs.astype(dtype.as_numpy_dtype)
probs /= np.linalg.norm(probs, ord=1, keepdims=True)
grid = ops.convert_to_tensor(grid, name="grid", dtype=dtype)
probs = ops.convert_to_tensor(probs, name="probs", dtype=dtype)
return grid, probs
grid, probs = tuple(quadrature_grid_and_probs)
grid = ops.convert_to_tensor(grid, name="grid", dtype=dtype)
probs = ops.convert_to_tensor(probs, name="unnormalized_probs", dtype=dtype)
probs /= linalg_ops.norm(probs, ord=1, axis=-1, keepdims=True, name="probs")
def _static_event_size(x):
"""Returns the static size of a specific dimension or `None`."""
return tensor_shape.dimension_value(x.shape.with_rank_at_least(1)[-1])
m, n = _static_event_size(probs), _static_event_size(grid)
if m is not None and n is not None:
if m != n:
raise ValueError("`quadrature_grid_and_probs` must be a `tuple` of "
"same-length zero-th-dimension `Tensor`s "
"(saw lengths {}, {})".format(m, n))
elif validate_args:
assertions = [
check_ops.assert_equal(
dimension_size(probs, axis=-1),
dimension_size(grid, axis=-1),
message=("`quadrature_grid_and_probs` must be a `tuple` of "
"same-length zero-th-dimension `Tensor`s")),
]
with ops.control_dependencies(assertions):
grid = array_ops.identity(grid)
probs = array_ops.identity(probs)
return grid, probs
def pad(x, axis, front=False, back=False, value=0, count=1, name=None):
"""Pads `value` to the front and/or back of a `Tensor` dim, `count` times.
Args:
x: `Tensor` input.
axis: Scalar `int`-like `Tensor` representing the single dimension to pad.
(Negative indexing is supported.)
front: Python `bool`; if `True` the beginning of the `axis` dimension is
padded with `value`, `count` times. If `False` no front padding is made.
back: Python `bool`; if `True` the end of the `axis` dimension is padded
with `value`, `count` times. If `False` no end padding is made.
value: Scalar `int`-like `Tensor` representing the actual value added to the
front and/or back of the `axis` dimension of `x`.
count: Scalar `int`-like `Tensor` representing number of elements added to
the front and/or back of the `axis` dimension of `x`. E.g., if `front =
back = True` then `2 * count` elements are added.
name: Python `str` name prefixed to Ops created by this function.
Returns:
pad: The padded version of input `x`.
Raises:
ValueError: if both `front` and `back` are `False`.
TypeError: if `count` is not `int`-like.
"""
with ops.name_scope(name, "pad", [x, value, count]):
x = ops.convert_to_tensor(x, name="x")
value = ops.convert_to_tensor(value, dtype=x.dtype, name="value")
count = ops.convert_to_tensor(count, name="count")
if not count.dtype.is_integer:
raise TypeError("`count.dtype` (`{}`) must be `int`-like.".format(
count.dtype.name))
if not front and not back:
raise ValueError("At least one of `front`, `back` must be `True`.")
ndims = (
x.shape.ndims if x.shape.ndims is not None else array_ops.rank(
x, name="ndims"))
axis = ops.convert_to_tensor(axis, name="axis")
axis_ = tensor_util.constant_value(axis)
if axis_ is not None:
axis = axis_
if axis < 0:
axis = ndims + axis
count_ = tensor_util.constant_value(count)
if axis_ >= 0 or x.shape.ndims is not None:
head = x.shape[:axis]
middle = tensor_shape.TensorShape(None if count_ is None else (
tensor_shape.dimension_at_index(x.shape, axis) + count_ *
(front + back)))
tail = x.shape[axis + 1:]
final_shape = head.concatenate(middle.concatenate(tail))
else:
final_shape = None
else:
axis = array_ops.where(axis < 0, ndims + axis, axis)
final_shape = None
x = array_ops.pad(
x,
paddings=array_ops.one_hot(
indices=array_ops.stack(
[axis if front else -1, axis if back else -1]),
depth=ndims,
axis=0,
on_value=count,
dtype=dtypes.int32),
constant_values=value)
if final_shape is not None:
x.set_shape(final_shape)
return x
def parent_frame_arguments():
"""Returns parent frame arguments.
When called inside a function, returns a dictionary with the caller's function
arguments. These are positional arguments and keyword arguments (**kwargs),
while variable arguments (*varargs) are excluded.
When called at global scope, this will return an empty dictionary, since there
are no arguments.
WARNING: If caller function argument names are overloaded before invoking
this method, then values will reflect the overloaded value. For this reason,
we recommend calling `parent_frame_arguments` at the beginning of the
function.
"""
# All arguments and the names used for *varargs, and **kwargs
arg_names, variable_arg_name, keyword_arg_name, local_vars = (
tf_inspect._inspect.getargvalues( # pylint: disable=protected-access
# Get the first frame of the caller of this method.
tf_inspect._inspect.stack()[1][0])) # pylint: disable=protected-access
# Remove the *varargs, and flatten the **kwargs. Both are
# nested lists.
local_vars.pop(variable_arg_name, {})
keyword_args = local_vars.pop(keyword_arg_name, {})
final_args = {}
# Copy over arguments and their values. In general, local_vars
# may contain more than just the arguments, since this method
# can be called anywhere in a function.
for arg_name in arg_names:
final_args[arg_name] = local_vars.pop(arg_name)
final_args.update(keyword_args)
return final_args
class AppendDocstring(object):
"""Helper class to promote private subclass docstring to public counterpart.
Example:
```python
class TransformedDistribution(Distribution):
@distribution_util.AppendDocstring(
additional_note="A special note!",
kwargs_dict={"foo": "An extra arg."})
def _prob(self, y, foo=None):
pass
```
In this case, the `AppendDocstring` decorator appends the `additional_note` to
the docstring of `prob` (not `_prob`) and adds a new `kwargs`
section with each dictionary item as a bullet-point.
For a more detailed example, see `TransformedDistribution`.
"""
def __init__(self, additional_note="", kwargs_dict=None):
"""Initializes the AppendDocstring object.
Args:
additional_note: Python string added as additional docstring to public
version of function.
kwargs_dict: Python string/string dictionary representing specific kwargs
expanded from the **kwargs input.
Raises:
ValueError: if kwargs_dict.key contains whitespace.
ValueError: if kwargs_dict.value contains newlines.
"""
self._additional_note = additional_note
if kwargs_dict:
bullets = []
for key in sorted(kwargs_dict.keys()):
value = kwargs_dict[key]
if any(x.isspace() for x in key):
raise ValueError("Parameter name \"%s\" contains whitespace." % key)
value = value.lstrip()
if "\n" in value:
raise ValueError(
"Parameter description for \"%s\" contains newlines." % key)
bullets.append("* `%s`: %s" % (key, value))
self._additional_note += ("\n\n##### `kwargs`:\n\n" + "\n".join(bullets))
def __call__(self, fn):
@functools.wraps(fn)
def _fn(*args, **kwargs):
return fn(*args, **kwargs)
if _fn.__doc__ is None:
_fn.__doc__ = self._additional_note
else:
_fn.__doc__ += "\n%s" % self._additional_note
return _fn
|
alsrgv/tensorflow
|
tensorflow/python/ops/distributions/util.py
|
Python
|
apache-2.0
| 55,467
|
import socket
#Define the different cases that this listener will respond to
#The listener responds to commands in the format {command arg 1, arg 2, ... arg n}
def network_command(recvString):
#get the command, it should be the first word
recvComm = recvString.split()
#Check which command was recieved, if not recognised return an error
if(recvComm[0].lower() == "adddevice"):
returnVal = 'OK'
elif(recvComm[0].lower() == "rectemp"): //Record the temperature
returnVal = 'OK'
elif(recvComm[0].lower() == "recsoil"): //Record the soil condition
returnVal = 'OK'
elif(recvComm[0].lower() == "rechum"): //Record the humidity
returnVal = 'OK'
elif(recvComm[0].lower() == "recsth"): //Record soil, temp and humidity
returnVal = 'OK'
else:
returnVal = 'commands are addevice, rectemp, recsoil, recsth'
return returnVal
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = '0.0.0.0'
port = 12345
s.bind((host, port))
s.listen(5)
while True:
c, addr = s.accept()
print ('Connection from ', addr)
reply = c.recv(4096)
print reply
c.send(network_command(reply) + '\n')
c.close()
|
otoolebrian/Watering-Can
|
raspberry/listener/WCListener.py
|
Python
|
apache-2.0
| 1,127
|
import logging
from autotest.client.shared import error
from virttest import aexpect
from virttest import utils_misc
from virttest import env_process
@error.context_aware
def run(test, params, env):
"""
KVM reboot test:
1) Log into a guest with virtio data disk
2) Format the disk and copy file to it
3) Stop the guest and boot up it again with the data disk set to readonly
4) Try to copy file to the data disk
5) Try to copy file from the data disk
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
error.context("Try to log into guest.", logging.info)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = float(params.get("login_timeout", 240))
session = vm.wait_for_login(timeout=timeout)
vols = utils_misc.get_winutils_vol(session)
if not vols:
raise error.TestError("Can not find winutils in guest.")
filen = 0
error.context("Format the disk and copy file to it", logging.info)
os_type = params["os_type"]
copy_cmd = params.get("copy_cmd", "copy %s %s")
disk_idx = params.get("disk_index", 1)
fs_type = params.get("fstype", "ntfs")
drive_letter = params.get("drive_letter", "I")
disk_size = params.get("partition_size_data", "200M")
src_file = params.get("src_file", "").replace("WIN_UTIL", vols)
utils_misc.format_guest_disk(session, disk_idx, drive_letter,
disk_size, fs_type, os_type)
dst_file = drive_letter + ":\\" + str(filen)
session.cmd(copy_cmd % (src_file, dst_file))
filen += 1
msg = "Stop the guest and boot up it again with the data disk"
msg += " set to readonly"
error.context(msg, logging.info)
session.close()
vm.destroy()
data_img = params.get("images").split()[-1]
params["image_readonly_%s" % data_img] = "yes"
params["force_create_image_%s" % data_img] = "no"
env_process.preprocess(test, params, env)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
session = vm.wait_for_login(timeout=timeout)
try:
error.context("Try to write to the readonly disk", logging.info)
dst_file_readonly = drive_letter + ":\\" + str(filen)
session.cmd(copy_cmd % (src_file, dst_file_readonly))
raise error.TestFail("Write in readonly disk should failed.")
except aexpect.ShellCmdError:
error.context("Try to read from the readonly disk", logging.info)
session.cmd(copy_cmd % (dst_file, "C:\\"))
session.close()
|
uni-peter-zheng/tp-qemu
|
qemu/tests/readonly_disk.py
|
Python
|
gpl-2.0
| 2,593
|
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class ImageList(QListWidget):
def __init__(self, *args, **kwargs):
super(ImageList, self).__init__(*args, **kwargs)
self.setViewMode(QListView.IconMode)
self.setIconSize(QSize(100, 100))
self.setSpacing(10)
def addImage(self, pixmap, path, bbid=None):
imageItem = QListWidgetItem(self)
imageItem.setIcon(QIcon(pixmap))
imageItem.setData(Qt.UserRole, pixmap)
imageItem.setData(Qt.UserRole+1, path)
imageItem.setData(Qt.UserRole+2, bbid)
imageItem.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsDragEnabled)
return imageItem
class CheckableImageList(ImageList):
def addImage(self, pixmap, path, bbid):
imageItem = super(CheckableImageList, self).addImage(pixmap, path, bbid)
imageItem.setFlags(imageItem.flags() | Qt.ItemIsUserCheckable)
imageItem.setCheckState(Qt.Unchecked)
return imageItem
def checkedItems(self):
checked_items = []
for index in range(self.count()):
if self.item(index).checkState() == Qt.Checked:
checked_items.append(self.item(index))
return checked_items
|
bluec0re/pascal-utils
|
widgets.py
|
Python
|
gpl-3.0
| 1,267
|
"""Performance example of running native ASTRA vs using ODL for reconstruction.
In this example, a 512x512 image is reconstructed using the Conjugate Gradient
Least Squares method on the GPU.
In general, ASTRA is faster than ODL since it does not need to perform any
copies and all arithmetic is performed on the GPU. Despite this, ODL is not
much slower. In this example, the overhead is about 60 %, depending on the
hardware used.
"""
import astra
import numpy as np
import matplotlib.pyplot as plt
import scipy.misc
import odl
from odl.util.testutils import timer
# Common geometry parameters
domain_size = np.array([512, 512])
n_angles = 180
det_size = 362
niter = 50
phantom = np.rot90(scipy.misc.ascent().astype('float'), -1)
# --- ASTRA ---
# Define ASTRA geometry
vol_geom = astra.create_vol_geom(domain_size[0], domain_size[1])
proj_geom = astra.create_proj_geom('parallel',
np.linalg.norm(domain_size) / det_size,
det_size,
np.linspace(0, np.pi, n_angles))
# Create ASTRA projector
proj_id = astra.create_projector('cuda', proj_geom, vol_geom)
# Create sinogram
sinogram_id, sinogram = astra.create_sino(phantom, proj_id)
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
# Set up the parameters for a reconstruction algorithm using the CUDA backend
cfg = astra.astra_dict('CGLS_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['ProjectorId'] = proj_id
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
with timer('ASTRA Run'):
# Run the algorithm
astra.algorithm.run(alg_id, niter)
# Get the result
rec = astra.data2d.get(rec_id)
# Clean up.
astra.algorithm.delete(alg_id)
astra.data2d.delete(rec_id)
astra.data2d.delete(sinogram_id)
astra.projector.delete(proj_id)
# --- ODL ---
# Create reconstruction space
reco_space = odl.uniform_discr(-domain_size / 2, domain_size / 2, domain_size)
# Create geometry
geometry = odl.tomo.parallel_beam_geometry(reco_space, n_angles, det_size)
# Create ray transform
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda')
# Create sinogram
data = ray_trafo(phantom)
# Solve with CGLS (aka CGN)
x = reco_space.zero()
with timer('ODL Run'):
odl.solvers.conjugate_gradient_normal(ray_trafo, x, data, niter=niter)
# Display results for comparison
plt.figure('Phantom')
plt.imshow(phantom.T, origin='lower', cmap='bone')
plt.figure('ASTRA Sinogram')
plt.imshow(sinogram.T, origin='lower', cmap='bone')
plt.figure('ASTRA Reconstruction')
plt.imshow(rec.T, origin='lower', cmap='bone')
plt.figure('ODL Sinogram')
plt.imshow(data.asarray().T, origin='lower', cmap='bone')
plt.figure('ODL Reconstruction')
plt.imshow(x.asarray().T, origin='lower', cmap='bone')
plt.show()
|
odlgroup/odl
|
examples/tomo/backends/astra_performance_cuda_parallel_2d_cg.py
|
Python
|
mpl-2.0
| 2,904
|
# -*- coding: utf-8 -*-
import re
from django import forms
from django.contrib.auth.models import User
from django.forms import formset_factory
from django.forms.widgets import TextInput
from django.utils import timezone
from dal import autocomplete
from tagging.fields import TagField
import accounts.utils
from bulb.models import Book, NeededBook, Request, Group, Session, Report, Membership, ReaderProfile, Recruitment, NewspaperSignup, DewanyaSuggestion, BookCommitment, RecommendedBook, BookRecommendation
from bulb import models, utils
city_choices = (
('-', u'الرياض وجدة والأحساء'),
(u'الرياض', u'الرياض فقط'),
(u'جدة', u'جدة فقط'),
(u'الأخساء', u'الأحساء فقط'),
)
gender_choices = (
('-', u'الطلاب والطالبات'),
('F', u'الطالبات'),
('M', u'الطلاب'),
)
class CommonControl:
def control_gender(self):
# Modify the choice only if the user is not a superuser not a
# Bulb coordinator. This is a really, really, really stupid
# default option, but it's just to make sure that people know
# what are chosing.
if self.user_gender == 'F':
if not self.instance.id:
self.fields['gender'].initial = 'F'
self.fields['gender'].choices = (
('-', u'الطلاب والطالبات'),
('F', u'الطالبات'),
)
elif self.user_gender == 'M':
if not self.instance.id:
self.fields['gender'].initial = 'M'
self.fields['gender'].choices = (
('-', u'الطلاب والطالبات'),
('M', u'الطلاب')
)
class NeededBookForm(forms.ModelForm):
class Meta:
model = models.NeededBook
fields = ['title', 'authors', 'description', 'cover', 'tags',
'category']
class GenericBookForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
# Remove is_publicly_owned field from ordinary users.
user = kwargs.pop('user')
super(GenericBookForm, self).__init__(*args, **kwargs)
if not user.is_superuser and \
not utils.is_bulb_coordinator_or_deputy(user) and \
not utils.is_bulb_member(user):
del self.fields['is_publicly_owned']
class BookEditForm(GenericBookForm):
"""Form used to edit books. It allows changing contribution type from
giving to lending."""
tags = TagField()
class Meta:
model = models.Book
fields = ['title', 'authors', 'edition', 'pages', 'condition',
'description', 'cover', 'tags', 'category',
'contribution', 'available_until', 'is_publicly_owned']
class BookGiveForm(GenericBookForm):
class Meta:
model = models.Book
fields = ['title', 'authors', 'edition', 'pages',
'condition', 'description', 'cover', 'tags',
'category', 'is_publicly_owned']
class BookLendForm(GenericBookForm):
class Meta:
model = models.Book
fields = ['title', 'authors', 'edition', 'pages', 'condition',
'description', 'cover', 'category', 'tags',
'available_until', 'is_publicly_owned']
class RequestForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
super(RequestForm, self).__init__(*args, **kwargs)
if instance.book.contribution == 'L':
self.fields['borrowing_end_date'].required = True
def clean_delivery(self):
# You know the "males and females are not supposed to meet"
# bullshit? Yeah.
data = self.cleaned_data['delivery']
if not data:
return data
requester_gender = accounts.utils.get_user_gender(self.instance.requester)
owner_gender = accounts.utils.get_user_gender(self.instance.book.submitter)
if data == 'I' or requester_gender != owner_gender:
delivery = 'I'
else:
delivery = 'D'
return delivery
class Meta:
model = models.Request
fields = ['delivery', 'borrowing_end_date']
widgets = {'delivery': forms.HiddenInput()}
class GroupForm(forms.ModelForm, CommonControl):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(GroupForm, self).__init__(*args, **kwargs)
# After creating the group, members can be controlled for a
# dedicated page.
if self.instance.pk:
del self.fields['members']
if self.instance.id:
self.user_city = accounts.utils.get_user_city(self.instance.coordinator)
self.user_gender = accounts.utils.get_user_gender(self.instance.coordinator)
if self.instance.is_limited_by_city:
self.fields['city'].initial = self.user_city
if self.instance.is_limited_by_gender:
self.fields['gender'].initial = self.user_gender
else:
self.user_city = accounts.utils.get_user_city(self.user)
self.user_gender = accounts.utils.get_user_gender(self.user)
self.fields['city'].initial = '-'
if not self.user.is_superuser and \
not utils.is_bulb_coordinator_or_deputy(self.user):
self.control_gender()
if self.user_city == u'الرياض':
self.fields['city'].choices = (
('-', u'الرياض وجدة والأحساء'),
('R', u'الرياض فقط'),
)
elif self.user_city == u'الأحساء':
self.fields['city'].choices = (
('-', u'الرياض وجدة والأحساء'),
('A', u'الأحساء فقط'),
)
elif self.user_city == u'جدة':
self.fields['city'].choices = (
('-', u'الرياض وجدة والأحساء'),
('J', u'جدة فقط'),
)
gender = forms.ChoiceField(choices=gender_choices, label=u"المجموعة تقبل عضوية")
city = forms.ChoiceField(choices=city_choices, label=u"تشمل المجموعة")
members = forms.ModelMultipleChoiceField(
widget=autocomplete.ModelSelect2Multiple(url='bulb:bulb-user-autocomplete',
attrs={
'data-html': 'true',
'data-placeholder': 'أَضف عنصرا',
}),
label=u"الأعضاء",
queryset=User.objects.all(),
required=False)
def save(self):
group = super(GroupForm, self).save(commit=False)
if self.user_gender == self.cleaned_data['gender']:
group.is_limited_by_gender = True
else:
group.is_limited_by_gender = False
if self.user_city == self.cleaned_data['city']:
group.is_limited_by_city = True
else:
group.is_limited_by_city = False
group.save()
return group
class Meta:
model = models.Group
fields = ['name', 'image', 'description', 'category',
'is_private']
class FreeSessionForm(forms.ModelForm, CommonControl):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(FreeSessionForm, self).__init__(*args, **kwargs)
self.user_city = accounts.utils.get_user_city(self.user)
self.user_gender = accounts.utils.get_user_gender(self.user)
# Limit the choice only if the user is not a superuser not a
# Bulb coordinator.
if not self.user.is_superuser and \
not utils.is_bulb_coordinator_or_deputy(self.user):
self.control_gender()
def save(self):
session = super(FreeSessionForm, self).save(commit=False)
if self.user_gender == self.cleaned_data['gender']:
session.is_limited_by_gender = True
session.save()
return session
gender = forms.ChoiceField(choices=gender_choices, label=u"الجلسة تقبل حضور")
class Meta:
model = models.Session
fields = ['title', 'agenda', 'location', 'date', 'start_time',
'end_time']
class SessionForm(forms.ModelForm):
class Meta:
model = models.Session
fields = ['title', 'agenda', 'location', 'date', 'start_time',
'end_time']
class ReportForm(forms.ModelForm):
attendees = forms.ModelMultipleChoiceField(
widget=autocomplete.ModelSelect2Multiple(url='bulb:bulb-user-autocomplete',
attrs={
'data-placeholder': 'أَضف اسما',
'data-html': 'true',
}),
label=u"الحضور",
queryset=User.objects.all(),
required=False)
class Meta:
model = models.Report
fields = ['attendees']#, 'description']
class ReaderProfileForm(forms.ModelForm):
def clean_twitter(self):
data = self.cleaned_data['twitter']
if not data:
return data
data = re.sub(u'^(?:https?://(?:m\.)?twitter\.com/)?@?', '', data)
if not re.match(u'^[A-Za-z\d_]+$', data):
raise forms.ValidationError(u"أدخل اسم مستخدم صحيح.")
else:
return data
def clean_goodreads(self):
data = self.cleaned_data['goodreads']
if not data:
return data
if not re.match(u'^(?:https?://)?(?:www.)?goodreads\.com/user/show/', data):
raise forms.ValidationError(u"أدخل رابط صفحتك على Goodreads.")
else:
# Because!
data = re.sub('^http://', 'https://', data)
if not re.match('^https?://', data):
data = u"https://" + data
return data
class Meta:
model = models.ReaderProfile
fields = ['areas_of_interests', 'favorite_books',
'favorite_writers', 'average_reading',
'goodreads', 'twitter']
class RecruitmentForm(forms.ModelForm):
class Meta:
model = models.Recruitment
exclude = ['user', 'year']
class NewspaperSignupForm(forms.ModelForm):
email = forms.EmailField(required=True)
class Meta:
model = models.NewspaperSignup
fields = ['email']
class DewanyaSuggestionForm(forms.ModelForm):
class Meta:
model = models.DewanyaSuggestion
fields = ['name', 'subject']
widgets = {'name': forms.widgets.TextInput(attrs={'class': 'user-autocomplete'})}
DewanyaSuggestionFormSet = forms.formset_factory(DewanyaSuggestionForm, extra=3)
class BookCommitmentForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
readathon = kwargs.pop('readathon')
super(BookCommitmentForm, self).__init__(*args, **kwargs)
if readathon.start_date < timezone.now().date():
del self.fields['wants_to_attend']
class Meta:
model = models.BookCommitment
fields = ['title', 'cover', 'pages', 'reason',
'wants_to_attend', 'wants_to_contribute']
class UpdateBookCommitmentForm(forms.ModelForm):
class Meta:
model = models.BookCommitment
fields = ['pages', 'completed_pages']
class CulturalProgramForm(forms.Form):
user = forms.ModelChoiceField(
widget=autocomplete.ModelSelect2(url='bulb:bulb-user-autocomplete',
attrs={
'data-html': 'true',
'data-placeholder': 'أَضف شخصا',
}),
label=u"المستعير/ة",
queryset=User.objects.filter(is_active=True))
book = forms.ModelChoiceField(
widget=autocomplete.ModelSelect2(url='bulb:bulb-book-autocomplete',
attrs={
'data-placeholder': 'أَضف كتابا',
}),
label=u"الكتاب",
queryset=models.Book.objects.available())
class EditBookRecommendationForm(forms.ModelForm):
class Meta:
model = models.BookRecommendation
fields = ['comment']
class AddBookRecommendationForm(forms.Form):
recommended_book = forms.ModelChoiceField(required=False,
widget=autocomplete.ModelSelect2(url='bulb:bulb-recommended-book-autocomplete',
attrs={
'data-html': 'true',
'data-placeholder': 'أَضف كتابا',
}),
label=u"الكتاب",
queryset=models.RecommendedBook.objects.all())
category = forms.ModelChoiceField(label=u"التصنيف",
required=False,
queryset=models.Category.objects.filter(is_meta=False))
title = forms.CharField(required=False, max_length=200, label=u"العنوان")
authors = forms.CharField(required=False, max_length=200, label=u"تأليف")
cover = forms.ImageField(required=False, label=u"الغلاف")
comment = forms.CharField(widget=forms.Textarea(attrs={'class': 'form-control input-lg'}), label=u"تعليق")
def clean(self):
cleaned_data = super(AddBookRecommendationForm, self).clean()
self.recommended_book = self.cleaned_data.get('recommended_book')
self.recommended_book_fields = {'title': self.cleaned_data['title'],
'authors': self.cleaned_data['authors'],
'category': self.cleaned_data['category'],
'cover': self.cleaned_data['cover']}
if not self.recommended_book and\
not all(self.recommended_book_fields.values()):
raise forms.ValidationError(u"لم تدخل بيانات كافية عن الكتاب")
def save(self, user):
if self.recommended_book:
book_recommendation = models.BookRecommendation.objects\
.create(recommended_book=self.recommended_book,
user=user,
comment=self.cleaned_data['comment'])
else:
recommended_book = models.RecommendedBook.objects.create(**self.recommended_book_fields)
book_recommendation = models.BookRecommendation.objects\
.create(recommended_book=recommended_book,
user=user,
comment=self.cleaned_data['comment'])
return book_recommendation
|
enjaz/enjaz
|
bulb/forms.py
|
Python
|
agpl-3.0
| 15,723
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2011 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Implementation of the tools to edit durations of selected music.
Durations are represented simply by lists of ly.lex.lilypond.Duration tokens.
"""
from __future__ import unicode_literals
import itertools
import icons
import inputdialog
import lydocument
import ly.rhythm
_clipboard = [] # clipboard for rhythm copy and paste
_history = set() # earlier rhythms typed in apply dialog
def rhythm_double(cursor):
ly.rhythm.rhythm_double(lydocument.cursor(cursor))
def rhythm_halve(cursor):
ly.rhythm.rhythm_halve(lydocument.cursor(cursor))
def rhythm_dot(cursor):
ly.rhythm.rhythm_dot(lydocument.cursor(cursor))
def rhythm_undot(cursor):
ly.rhythm.rhythm_undot(lydocument.cursor(cursor))
def rhythm_remove_scaling(cursor):
ly.rhythm.rhythm_remove_scaling(lydocument.cursor(cursor))
def rhythm_remove_fraction_scaling(cursor):
ly.rhythm.rhythm_remove_fraction_scaling(lydocument.cursor(cursor))
def rhythm_remove(cursor):
ly.rhythm.rhythm_remove(lydocument.cursor(cursor))
def rhythm_implicit(cursor):
ly.rhythm.rhythm_implicit(lydocument.cursor(cursor))
def rhythm_implicit_per_line(cursor):
ly.rhythm.rhythm_implicit_per_line(lydocument.cursor(cursor))
def rhythm_explicit(cursor):
ly.rhythm.rhythm_explicit(lydocument.cursor(cursor))
def rhythm_apply(cursor, mainwindow):
durs = inputdialog.getText(mainwindow,
_("Apply Rhythm"), _("Enter a rhythm:"),
complete = sorted(_history),
regexp = r'([0-9./* ]|\\breve|\\longa|\\maxima)+',
help = "rhythm", icon = icons.get('tools-rhythm'))
if not durs:
return # user cancelled dialog
durations = durs.split()
if durations:
_history.add(durs.strip())
ly.rhythm.rhythm_overwrite(lydocument.cursor(cursor), durations)
def rhythm_copy(cursor):
_clipboard[:] = ly.rhythm.rhythm_extract(lydocument.cursor(cursor))
def rhythm_paste(cursor):
ly.rhythm.rhythm_overwrite(lydocument.cursor(cursor), _clipboard)
|
shimpe/frescobaldi
|
frescobaldi_app/rhythm/rhythm.py
|
Python
|
gpl-2.0
| 2,875
|
import time
from core import logger
from core.auto_process.common import ProcessResult
from core.auto_process.managers.sickbeard import SickBeard
import requests
class PyMedusa(SickBeard):
"""PyMedusa class."""
def __init__(self, sb_init):
super(PyMedusa, self).__init__(sb_init)
def _create_url(self):
return '{0}{1}:{2}{3}/home/postprocess/processEpisode'.format(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, self.sb_init.web_root)
class PyMedusaApiV1(SickBeard):
"""PyMedusa apiv1 class."""
def __init__(self, sb_init):
super(PyMedusaApiV1, self).__init__(sb_init)
def _create_url(self):
return '{0}{1}:{2}{3}/api/{4}/'.format(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, self.sb_init.web_root, self.sb_init.apikey)
def api_call(self):
self._process_fork_prarams()
url = self._create_url()
logger.debug('Opening URL: {0} with params: {1}'.format(url, self.sb_init.fork_params), self.sb_init.section)
try:
response = self.session.get(url, auth=(self.sb_init.username, self.sb_init.password), params=self.sb_init.fork_params, stream=True, verify=False, timeout=(30, 1800))
except requests.ConnectionError:
logger.error('Unable to open URL: {0}'.format(url), self.sb_init.section)
return ProcessResult(
message='{0}: Failed to post-process - Unable to connect to {0}'.format(self.sb_init.section),
status_code=1,
)
if response.status_code not in [requests.codes.ok, requests.codes.created, requests.codes.accepted]:
logger.error('Server returned status {0}'.format(response.status_code), self.sb_init.section)
return ProcessResult(
message='{0}: Failed to post-process - Server returned status {1}'.format(self.sb_init.section, response.status_code),
status_code=1,
)
if response.json()['result'] == 'success':
return ProcessResult(
message='{0}: Successfully post-processed {1}'.format(self.sb_init.section, self.input_name),
status_code=0,
)
return ProcessResult(
message='{0}: Failed to post-process - Returned log from {0} was not as expected.'.format(self.sb_init.section),
status_code=1, # We did not receive Success confirmation.
)
class PyMedusaApiV2(SickBeard):
"""PyMedusa apiv2 class."""
def __init__(self, sb_init):
super(PyMedusaApiV2, self).__init__(sb_init)
# Check for an apikey, as this is required with using fork = medusa-apiv2
if not sb_init.apikey:
raise Exception('For the section SickBeard `fork = medusa-apiv2` you also need to configure an `apikey`')
def _create_url(self):
return '{0}{1}:{2}{3}/api/v2/postprocess'.format(self.sb_init.protocol, self.sb_init.host, self.sb_init.port, self.sb_init.web_root)
def _get_identifier_status(self, url):
# Loop through requesting medusa for the status on the queueitem.
try:
response = self.session.get(url, verify=False, timeout=(30, 1800))
except requests.ConnectionError:
logger.error('Unable to get postprocess identifier status', self.sb_init.section)
return False
try:
jdata = response.json()
except ValueError:
return False
return jdata
def api_call(self):
self._process_fork_prarams()
url = self._create_url()
logger.debug('Opening URL: {0}'.format(url), self.sb_init.section)
payload = self.sb_init.fork_params
payload['resource'] = self.sb_init.fork_params['nzbName']
del payload['nzbName']
# Update the session with the x-api-key
self.session.headers.update({
'x-api-key': self.sb_init.apikey,
'Content-type': 'application/json'
})
# Send postprocess request
try:
response = self.session.post(url, json=payload, verify=False, timeout=(30, 1800))
except requests.ConnectionError:
logger.error('Unable to send postprocess request', self.sb_init.section)
return ProcessResult(
message='{0}: Unable to send postprocess request to PyMedusa',
status_code=1,
)
# Get UUID
if response:
try:
jdata = response.json()
except ValueError:
logger.debug('No data returned from provider')
return False
if not jdata.get('status') or not jdata['status'] == 'success':
return False
queueitem_identifier = jdata['queueItem']['identifier']
wait_for = int(self.sb_init.config.get('wait_for', 2))
n = 0
response = {}
url = '{0}/{1}'.format(url, queueitem_identifier)
while n < 12: # set up wait_for minutes to see if command completes..
time.sleep(5 * wait_for)
response = self._get_identifier_status(url)
if response and response.get('success'):
break
if 'error' in response:
break
n += 1
# Log Medusa's PP logs here.
if response.get('output'):
for line in response['output']:
logger.postprocess('{0}'.format(line), self.sb_init.section)
# For now this will most likely always be True. But in the future we could return an exit state
# for when the PP in medusa didn't yield an expected result.
if response.get('success'):
return ProcessResult(
message='{0}: Successfully post-processed {1}'.format(self.sb_init.section, self.input_name),
status_code=0,
)
return ProcessResult(
message='{0}: Failed to post-process - Returned log from {0} was not as expected.'.format(self.sb_init.section),
status_code=1, # We did not receive Success confirmation.
)
|
clinton-hall/nzbToMedia
|
core/auto_process/managers/pymedusa.py
|
Python
|
gpl-3.0
| 6,117
|
#!env/bin/python
# -*- coding: utf8 -*-
# Utility to merge two versions of the same sqlite3 database.
#Copyright (C) 2015 J. Pablo Navarro
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License along
#with this program; if not, write to the Free Software Foundation, Inc.,
#51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
This utiliy merges two version of the same database on a single file.
It's mandatory both files have the same tables with the same fields.
It should receive two arguments with the databases sources.
The first one will be prioritary on conflicts.
"""
class sqlitemerger:
"""
Just the class of the utility.
"""
def __init__(self,db1,db2):
"""
Load necessary libraries and make the two global connections and cursors.
"""
import sqlite3
self.db1 = db1
self.db2 = db2
global connection_db1, connection_db2, c1, c2
connection_db1 = sqlite3.connect(db1)
connection_db2 = sqlite3.connect(db2)
c1 = connection_db1.cursor()
c2 = connection_db2.cursor()
pass
def db_checker(self,db):
"""
Check for existance and they're sqlite3 databases.
"""
import magic
if 'sqlite' in magic.from_file(db).lower():
return True
else:
return False
def dbs_checker_tables_and_fields(self,db1,db2):
"""
Check if the files have the same tables and then the same fields.
"""
tablas1 = c1.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()[0] # Lista de tablas db1
tablas2 = c2.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall()[0] # Lista de tablas db2
if tablas1 and tablas2:
if c1.execute("PRAGMA table_info(%s)" % tablas1[0]).fetchall()[0] and c2.execute("PRAGMA table_info(%s)" % tablas1[0]).fetchall()[0]:
return True
else:
return False
else:
return False
def merge(self):
"""
Use this method to merge the two database into the new one.
"""
output_name='merge_output.db'
if self.db_checker(self.db1) and self.db_checker(self.db2):
print 'Files are OK.'
if self.dbs_checker_tables_and_fields(self.db1,self.db2):
print 'They have the same tables and fields.'
self.the_merger(self.db1,self.db2,output_name)
else:
print 'They have not the same tables and fields.'
sys.exit()
else:
print 'There is some problem with your files.\n'
sys.exit()
pass
def the_merger(self,db1,db2,output_name):
"""
Magic happens here.
"""
print 'Whoala!!'
pass
def test(self):
return True
def test2(self):
return 1
def test3():
print 'hola'
# ------------------------------------------------
import sys
#print 'numero de argumentos: ',len(sys.argv)
#print 'argumentos: '+str(sys.argv)
if len(sys.argv) != 3:
print "Error!\nTwo files need to be adressed."
sys.exit()
else:
x = sqlitemerger(sys.argv[1],sys.argv[2])
x.merge()
#import milibreria as milib
#milib.prueba()
|
helfio/sqlite_merger
|
main.py
|
Python
|
gpl-2.0
| 3,579
|
"""
Authentication example using Flask-Login
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This provides a simple example of using Flask-Login as the authentication
framework which can guard access to certain API endpoints.
This requires the following Python libraries to be installed:
* Flask
* Flask-Login
* Flask-Restless
* Flask-SQLAlchemy
* Flask-WTF
To install them using ``pip``, do::
pip install Flask Flask-SQLAlchemy Flask-Restless Flask-Login Flask-WTF
To use this example, run this package from the command-line. If you are
using Python 2.7 or later::
python -m authentication
If you are using Python 2.6 or earlier::
python -m authentication.__main__
Attempts to access the URL of the API for the :class:`User` class at
``http://localhost:5000/api/user`` will fail with an :http:statuscode:`401`
because you have not yet logged in. To log in, visit
``http://localhost:5000/login`` and login with username ``example`` and
password ``example``. Once you have successfully logged in, you may now
make :http:get:`http://localhost:5000/api/user` requests.
:copyright: 2012 Jeffrey Finkelstein <jeffrey.finkelstein@gmail.com>
:license: GNU AGPLv3+ or BSD
"""
import os
import os.path
from flask import Flask, render_template, redirect, url_for
from flask.ext.login import current_user, login_user, LoginManager, UserMixin
from flask.ext.restless import APIManager, ProcessingException, NO_CHANGE
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.wtf import PasswordField, SubmitField, TextField, Form
# Step 0: the database in this example is at './test.sqlite'.
DATABASE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'test.sqlite')
if os.path.exists(DATABASE):
os.unlink(DATABASE)
# Step 1: setup the Flask application.
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['TESTING'] = True
app.config['SECRET_KEY'] = os.urandom(24)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///%s' % DATABASE
# Step 2: initialize extensions.
db = SQLAlchemy(app)
api_manager = APIManager(app, flask_sqlalchemy_db=db)
login_manager = LoginManager()
login_manager.setup_app(app)
# Step 3: create the user database model.
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.Unicode)
password = db.Column(db.Unicode)
# Step 4: create the database and add a test user.
db.create_all()
user1 = User(username=u'example', password=u'example')
db.session.add(user1)
db.session.commit()
# Step 5: this is required for Flask-Login.
@login_manager.user_loader
def load_user(userid):
return User.query.get(userid)
# Step 6: create the login form.
class LoginForm(Form):
username = TextField('username')
password = PasswordField('password')
submit = SubmitField('Login')
# Step 7: create endpoints for the application, one for index and one for login
@app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
#
# you would check username and password here...
#
username, password = form.username.data, form.password.data
matches = User.query.filter_by(username=username,
password=password).all()
if len(matches) > 0:
login_user(matches[0])
return redirect(url_for('index'))
flash('Username and password pair not found')
return render_template('login.html', form=form)
# Step 8: create the API for User with the authentication guard.
def auth_func(**kw):
if not current_user.is_authenticated():
raise ProcessingException(description='Not authenticated!')
api_manager.create_api(User, preprocessors=dict(GET_SINGLE=[auth_func],
GET_MANY=[auth_func]))
# Step 9: configure and run the application
app.run()
# Step 10: visit http://localhost:5000/api/user in a Web browser. You will
# receive a "Not Authorized" response.
#
# Step 11: visit http://localhost:5000/login and enter username "example" and
# password "example". You will then be logged in.
#
# Step 12: visit http://localhost:5000/api/user again. This time you will get a
# response showing the objects in the User table of the database.
|
dnordberg/flask-restless
|
examples/server_configurations/authentication/__main__.py
|
Python
|
agpl-3.0
| 4,483
|
# This file is part of the django-environ.
#
# Copyright (c) 2021, Serghei Iakovlev <egrep@protonmail.ch>
# Copyright (c) 2013-2021, Daniele Faraglia <daniele.faraglia@gmail.com>
#
# For the full copyright and license information, please view
# the LICENSE.txt file that was distributed with this source code.
from environ.compat import json
class FakeEnv:
URL = 'http://www.google.com/'
POSTGRES = 'postgres://uf07k1:wegauwhg@ec2-107-21-253-135.compute-1.amazonaws.com:5431/d8r82722'
MYSQL = 'mysql://bea6eb0:69772142@us-cdbr-east.cleardb.com/heroku_97681?reconnect=true'
MYSQL_CLOUDSQL_URL = 'mysql://djuser:hidden-password@//cloudsql/arvore-codelab:us-central1:mysqlinstance/mydatabase'
MYSQLGIS = 'mysqlgis://user:password@127.0.0.1/some_database'
SQLITE = 'sqlite:////full/path/to/your/database/file.sqlite'
ORACLE_TNS = 'oracle://user:password@sid/'
ORACLE = 'oracle://user:password@host:1521/sid'
CUSTOM_BACKEND = 'custom.backend://user:password@example.com:5430/database'
REDSHIFT = 'redshift://user:password@examplecluster.abc123xyz789.us-west-2.redshift.amazonaws.com:5439/dev'
MEMCACHE = 'memcache://127.0.0.1:11211'
REDIS = 'rediscache://127.0.0.1:6379/1?client_class=django_redis.client.DefaultClient&password=secret'
EMAIL = 'smtps://user@domain.com:password@smtp.example.com:587'
JSON = dict(one='bar', two=2, three=33.44)
DICT = dict(foo='bar', test='on')
PATH = '/home/dev'
EXPORTED = 'exported var'
@classmethod
def generate_data(cls):
return dict(STR_VAR='bar',
MULTILINE_STR_VAR='foo\\nbar',
MULTILINE_QUOTED_STR_VAR='---BEGIN---\\r\\n---END---',
MULTILINE_ESCAPED_STR_VAR='---BEGIN---\\\\n---END---',
INT_VAR='42',
FLOAT_VAR='33.3',
FLOAT_COMMA_VAR='33,3',
FLOAT_STRANGE_VAR1='123,420,333.3',
FLOAT_STRANGE_VAR2='123.420.333,3',
FLOAT_NEGATIVE_VAR='-1.0',
BOOL_TRUE_STRING_LIKE_INT='1',
BOOL_TRUE_INT=1,
BOOL_TRUE_STRING_LIKE_BOOL='True',
BOOL_TRUE_STRING_1='on',
BOOL_TRUE_STRING_2='ok',
BOOL_TRUE_STRING_3='yes',
BOOL_TRUE_STRING_4='y',
BOOL_TRUE_STRING_5='true',
BOOL_TRUE_BOOL=True,
BOOL_FALSE_STRING_LIKE_INT='0',
BOOL_FALSE_INT=0,
BOOL_FALSE_STRING_LIKE_BOOL='False',
BOOL_FALSE_BOOL=False,
PROXIED_VAR='$STR_VAR',
ESCAPED_VAR=r'\$baz',
INT_LIST='42,33',
INT_TUPLE='(42,33)',
STR_LIST_WITH_SPACES=' foo, bar',
EMPTY_LIST='',
DICT_VAR='foo=bar,test=on',
DATABASE_URL=cls.POSTGRES,
DATABASE_MYSQL_URL=cls.MYSQL,
DATABASE_MYSQL_GIS_URL=cls.MYSQLGIS,
DATABASE_SQLITE_URL=cls.SQLITE,
DATABASE_ORACLE_URL=cls.ORACLE,
DATABASE_ORACLE_TNS_URL=cls.ORACLE_TNS,
DATABASE_REDSHIFT_URL=cls.REDSHIFT,
DATABASE_CUSTOM_BACKEND_URL=cls.CUSTOM_BACKEND,
DATABASE_MYSQL_CLOUDSQL_URL=cls.MYSQL_CLOUDSQL_URL,
CACHE_URL=cls.MEMCACHE,
CACHE_REDIS=cls.REDIS,
EMAIL_URL=cls.EMAIL,
URL_VAR=cls.URL,
JSON_VAR=json.dumps(cls.JSON),
PATH_VAR=cls.PATH,
EXPORTED_VAR=cls.EXPORTED)
|
joke2k/django-environ
|
tests/fixtures.py
|
Python
|
mit
| 3,765
|
# encoding: utf-8
# module samba.dcerpc.drsblobs
# from /usr/lib/python2.7/dist-packages/samba/dcerpc/drsblobs.so
# by generator 1.135
""" drsblobs DCE/RPC """
# imports
import dcerpc as __dcerpc
import talloc as __talloc
class ExtendedErrorInfo(__talloc.Object):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __ndr_pack__(self, *args, **kwargs): # real signature unknown
"""
S.ndr_pack(object) -> blob
NDR pack
"""
pass
def __ndr_print__(self, *args, **kwargs): # real signature unknown
"""
S.ndr_print(object) -> None
NDR print
"""
pass
def __ndr_unpack__(self, *args, **kwargs): # real signature unknown
"""
S.ndr_unpack(class, blob, allow_remaining=False) -> None
NDR unpack
"""
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
computer_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
detection_location = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
flags = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
generating_component = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
next = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
num_params = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
params = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
pid = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
status = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
time = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/drsblobs/ExtendedErrorInfo.py
|
Python
|
gpl-2.0
| 2,097
|
# Gambit scripts
#
# Copyright (C) USC Information Sciences Institute
# Author: Nibir Bora <nbora@usc.edu>
# URL: <http://cbg.isi.edu/>
# For license information, see LICENSE
import os
import sys
import csv
import math
import nltk
import shutil
import anyjson
import psycopg2
import itertools
import cPickle as pickle
import multiprocessing as mp
import matplotlib.pyplot as plt
from pprint import pprint
from pymining import itemmining
from nltk.corpus import stopwords
from nltk.probability import FreqDist
from datetime import datetime, timedelta
import settings as my
sys.path.insert(0, os.path.abspath('..'))
#
# GET DATA
#
def get_data():
'''
Matches sets from sets.json
File names are according to legend.json
Stopwords = nltk + stopwords.txt - vocab.txt
'''
print '\n', my.TS_START, my.TS_WINDOW, '\n'
SQL = '''SELECT timestamp AT TIME ZONE '{tz}', text, user_id\
FROM {rel_tweet} \
WHERE timestamp BETWEEN '{ts_start}'
AND timestamp '{ts_start}' + INTERVAL '{window} days'
'''.format(rel_tweet=my.REL_TWEET,
ts_start=my.TS_START, window=my.TS_WINDOW, tz=my.TIMEZONE)
print 'Querying DB...'
con = psycopg2.connect(my.DB_CONN_STRING)
cur = con.cursor()
cur.execute(SQL)
recs = cur.fetchall()
con.close()
print '{count} records retrieved.'.format(count=len(recs))
global sets
sets = {}
with open('data/' + my.DATA_FOLDER + 'sets.json', 'rb') as fp:
sets = anyjson.loads(fp.read())
sets = dict((st[0], tuple(set(s) for s in st[1])) for st in sets.items())
#pprint(sets)
global sw
sw = stopwords.words('english')
with open('data/' + my.DATA_FOLDER + 'stopwords.txt', 'rb') as fp:
sw.extend(fp.read().split())
sw.extend(my.STOPWORDS)
sw = [w for w in sw if len(w) > 2]
sw = list(set(sw))
with open('data/' + my.DATA_FOLDER + 'vocab.txt', 'rb') as fp:
vocab = fp.read().split()
sw = tuple(set(sw) - set(vocab))
print 'Stopword list length', len(sw)
global ts_start
ts_start = datetime.strptime(my.TS_START, my.TS_FORMAT)
td = timedelta(days=my.TS_WINDOW)
ts_end = ts_start + td
time = {
'ts_start' : my.TS_START,
'ts_end' : str(ts_end)
}
with open('data/' + my.DATA_FOLDER + 'time.json', 'wb') as fp:
fp.write(anyjson.dumps(time))
# optional
global influential_user_ids
with open('data/' + my.DATA_FOLDER + 'influential_user_ids.json', 'rb') as fp:
influential_user_ids = anyjson.loads(fp.read())
#for rec in recs:
# _match_tweet(rec)
manager = mp.Manager()
global q
q = manager.Queue()
pool = mp.Pool(processes=my.PROCESSES)
watcher = pool.apply_async(_tweet_writer, (q,))
pool.map(_match_tweet, recs)
q.put([-1, 0, ''])
pool.close()
def _tweet_writer(q):
'''Async. writer'''
files = {}
writer = {}
with open('data/' + my.DATA_FOLDER + 'legend.json', 'rb') as fp:
legend = anyjson.loads(fp.read())
path = 'data/' + my.DATA_FOLDER + 'data/'
if not os.path.exists(path): os.makedirs(path)
for id in legend:
open(path + str(id) + '.txt', 'wb')
files[id] = open(path + str(id) + '.txt', 'wb')
writer[id] = csv.writer(files[id], delimiter=',')
while 1:
id, diff, text = q.get()
if id == -1: break
writer[id].writerow([diff, text])
def _match_tweet(rec):
'''Map function'''
# optional
user_id = int(rec[2])
if user_id not in influential_user_ids: return
text = rec[1].lower().replace('\'', '')
toks = nltk.word_tokenize(text)
toks = [\
t for t in toks \
if len(t) > 2 \
and (t.replace('_','a').replace('-','b').isalnum() \
and not t.isdigit()) \
and t not in sw \
]
t = set(toks)
for id, st in sets.iteritems():
match = False
for s in st:
if len(s & t) >= len(s):
match = True
break
if match:
text = ' '.join(toks)
ts = rec[0]
diff = ts - ts_start
diff = int(diff.total_seconds())
q.put([id, diff, text])
#writer[id].writerow([diff, text])
#files[id].write(str(diff) + ',' + ' '.join(toks) + '\n')
def get_all_data():
''''''
print '\n', my.TS_START, my.TS_WINDOW, '\n'
SQL = '''SELECT timestamp AT TIME ZONE '{tz}', text\
FROM {rel_tweet} \
WHERE timestamp BETWEEN '{ts_start}'
AND timestamp '{ts_start}' + INTERVAL '{window} days'
'''.format(rel_tweet=my.REL_TWEET,
ts_start=my.TS_START, window=my.TS_WINDOW, tz=my.TIMEZONE)
print 'Querying DB...'
con = psycopg2.connect(my.DB_CONN_STRING)
cur = con.cursor()
cur.execute(SQL)
recs = cur.fetchall()
con.close()
print '{count} records retrieved.'.format(count=len(recs))
sw = stopwords.words('english')
with open('data/' + my.DATA_FOLDER + 'stopwords.txt', 'rb') as fp:
sw.extend(fp.read().split())
sw.extend(my.STOPWORDS)
sw = [w for w in sw if len(w) > 2]
sw = list(set(sw))
with open('data/' + my.DATA_FOLDER + 'vocab.txt', 'rb') as fp:
vocab = fp.read().split()
sw = tuple(set(sw) - set(vocab))
print 'Stopword list length', len(sw)
ts_start = datetime.strptime(my.TS_START, my.TS_FORMAT)
td = timedelta(days=my.TS_WINDOW)
ts_end = ts_start + td
time = {
'ts_start' : my.TS_START,
'ts_end' : str(ts_end)
}
with open('data/' + my.DATA_FOLDER + 'time.json', 'wb') as fp:
fp.write(anyjson.dumps(time))
path = 'data/' + my.DATA_FOLDER + 'data/'
if not os.path.exists(path): os.makedirs(path)
fp = open(path + 'all.txt', 'wb')
cr = csv.writer(fp, delimiter=',')
for rec in recs:
text = rec[1].lower().replace('\'', '')
toks = nltk.word_tokenize(text)
toks = [\
t for t in toks \
if len(t) > 2 \
and (t.replace('_','a').replace('-','b').isalnum() \
and not t.isdigit()) \
and t not in sw \
]
if len(toks) > 0:
text = ' '.join(toks)
ts = rec[0]
diff = ts - ts_start
diff = int(diff.total_seconds())
cr.writerow([diff, text])
fp.close()
#
# INFLUENTIAL
#
def plot_influential():
''''''
follow = []
friend = []
users = []
with open('data/' + my.DATA_FOLDER + 'user_ids_done.csv', 'rb') as fp:
cr = csv.reader(fp, delimiter=',')
for row in cr:
follow.append(int(row[1]))
friend.append(int(row[2]))
users.append((int(row[0]), int(row[1])))
influential = tuple(i[0] for i in users if i[1]>5000)
print len(influential), 'influential users.'
with open('data/' + my.DATA_FOLDER + 'influential_user_ids.json', 'wb') as fp:
fp.write(anyjson.dumps(influential))
fig = plt.figure(figsize=(12,8))
fig.set_tight_layout(True)
ax = fig.add_subplot(221)
ax.hist(follow, bins=100)
ax.set_ylim(0, 50)
ax.set_title('Followers')
fig.set_tight_layout(True)
ax = fig.add_subplot(222)
ax.hist(follow, bins=100, range=(100, 100000))
ax.set_ylim(0, 1000)
ax.set_title('Followers')
fig.set_tight_layout(True)
ax = fig.add_subplot(223)
ax.hist(friend, bins=100)
ax.set_title('Friends')
fig.set_tight_layout(True)
ax = fig.add_subplot(224)
ax.hist(friend, bins=100, range=(100, 100000))
ax.set_ylim(0, 1000)
ax.set_title('Friends')
plt.savefig('data/' + my.DATA_FOLDER + 'influential-hist' + '.pdf')
#
#
#
def make_pick_data():
''''''
with open('data/' + my.DATA_FOLDER + 'player_legend.json', 'rb') as fp:
player_legend = anyjson.loads(fp.read()).items()
with open('data/' + my.DATA_FOLDER + 'picks_legend.json', 'rb') as fp:
picks_legend = anyjson.loads(fp.read()).items()
from_path = 'data/' + my.DATA_FOLDER + 'player_data/'
to_path = 'data/' + my.DATA_FOLDER + 'data/'
for id, name in picks_legend:
pid = 0
screen_name = None
for id2, n2 in player_legend:
if _match_names(name, n2):
player_legend.remove((id2, n2))
screen_name = n2
pid = id2
print name, '\t', screen_name, pid
shutil.copy(from_path + str(pid) + '.txt',
to_path + str(id) + '.txt')
def _match_names(n1, n2):
n1 = n1.lower().split()
n2 = n2.lower().split()
avg_len = min(len(n1), len(n2))
thres = avg_len / 2.0
intersect = set(n1) & set(n2)
if len(intersect) > thres:
return True
else: return False
|
nbir/gambit-scripts
|
scripts/nba_predictions/src/data.py
|
Python
|
apache-2.0
| 7,811
|
from django.core.management.base import BaseCommand
from theapps.sitemaps import ping_google
class Command(BaseCommand):
help = "Ping google with an updated sitemap, pass optional url of sitemap"
def execute(self, *args, **options):
if len(args) == 1:
sitemap_url = args[0]
else:
sitemap_url = None
ping_google(sitemap_url=sitemap_url)
|
thepian/theapps
|
theapps/sitemaps/management/commands/ping_google.py
|
Python
|
gpl-3.0
| 396
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import time
from proton.reactor import Container, ApplicationEvent, EventInjector, Selector
from proton.handlers import Handshaker, MessagingHandler
from proton import Handler, Url, symbol
from .common import Test, SkipTest, TestServer, free_tcp_port, ensureCanTestExtendedSASL
class Barf(Exception):
pass
class BarfOnInit:
def on_reactor_init(self, event):
raise Barf()
def on_connection_init(self, event):
raise Barf()
def on_session_init(self, event):
raise Barf()
def on_link_init(self, event):
raise Barf()
class BarfOnTask:
def on_timer_task(self, event):
raise Barf()
class BarfOnFinal:
init = False
def on_reactor_init(self, event):
self.init = True
def on_reactor_final(self, event):
raise Barf()
class BarfOnFinalDerived(Handshaker):
init = False
def on_reactor_init(self, event):
self.init = True
def on_reactor_final(self, event):
raise Barf()
class ExceptionTest(Test):
def setUp(self):
self.container = Container()
def test_reactor_final(self):
self.container.global_handler = BarfOnFinal()
try:
self.container.run()
assert False, "expected to barf"
except Barf:
pass
def test_global_set(self):
self.container.global_handler = BarfOnInit()
try:
self.container.run()
assert False, "expected to barf"
except Barf:
pass
def test_global_add(self):
self.container.global_handler.add(BarfOnInit())
try:
self.container.run()
assert False, "expected to barf"
except Barf:
pass
def test_reactor_set(self):
self.container.handler = BarfOnInit()
try:
self.container.run()
assert False, "expected to barf"
except Barf:
pass
def test_reactor_add(self):
self.container.handler.add(BarfOnInit())
try:
self.container.run()
assert False, "expected to barf"
except Barf:
pass
def test_connection(self):
self.container.connection(BarfOnInit())
try:
self.container.run()
assert False, "expected to barf"
except Barf:
pass
def test_connection_set(self):
c = self.container.connection()
c.handler = BarfOnInit()
try:
self.container.run()
assert False, "expected to barf"
except Barf:
pass
def test_connection_add(self):
c = self.container.connection()
c.handler = object()
c.handler.add(BarfOnInit())
try:
self.container.run()
assert False, "expected to barf"
except Barf:
pass
def test_session_set(self):
c = self.container.connection()
s = c.session()
s.handler = BarfOnInit()
try:
self.container.run()
assert False, "expected to barf"
except Barf:
pass
def test_session_add(self):
c = self.container.connection()
s = c.session()
s.handler = object()
s.handler.add(BarfOnInit())
try:
self.container.run()
assert False, "expected to barf"
except Barf:
pass
def test_link_set(self):
c = self.container.connection()
s = c.session()
l = s.sender("xxx")
l.handler = BarfOnInit()
try:
self.container.run()
assert False, "expected to barf"
except Barf:
pass
def test_link_add(self):
c = self.container.connection()
s = c.session()
l = s.sender("xxx")
l.handler = object()
l.handler.add(BarfOnInit())
try:
self.container.run()
assert False, "expected to barf"
except Barf:
pass
def test_schedule(self):
self.container.schedule(0, BarfOnTask())
try:
self.container.run()
assert False, "expected to barf"
except Barf:
pass
def test_schedule_event(self):
class Nothing:
def __init__(self, p):
self.parent = p
results = []
def on_timer_task(self, event):
self.parent.triggered = True
assert event.context == self.parent.task
assert event.container == self.parent.container
self.task = self.container.schedule(0, Nothing(self))
self.container.run()
assert self.triggered == True
def test_schedule_many_nothings(self):
class Nothing:
results = []
def on_timer_task(self, event):
self.results.append(None)
num = 12345
for a in range(num):
self.container.schedule(0, Nothing())
self.container.run()
assert len(Nothing.results) == num
def test_schedule_many_nothing_refs(self):
class Nothing:
results = []
def on_timer_task(self, event):
self.results.append(None)
num = 12345
tasks = []
for a in range(num):
tasks.append(self.container.schedule(0, Nothing()))
self.container.run()
assert len(Nothing.results) == num
def test_schedule_many_nothing_refs_cancel_before_run(self):
class Nothing:
results = []
def on_timer_task(self, event):
self.results.append(None)
num = 12345
tasks = []
for a in range(num):
tasks.append(self.container.schedule(0, Nothing()))
for task in tasks:
task.cancel()
self.container.run()
assert len(Nothing.results) == 0
def test_schedule_cancel(self):
barf = self.container.schedule(10, BarfOnTask())
class CancelBarf:
def __init__(self, barf):
self.barf = barf
def on_timer_task(self, event):
self.barf.cancel()
pass
self.container.schedule(0, CancelBarf(barf))
start = time.time()
try:
self.container.run()
elapsed = time.time() - start
assert elapsed < 10, "should have cancelled immediately, took %ss" % elapsed
except Barf:
assert False, "expected barf to be cancelled"
def test_schedule_cancel_many(self):
num = 12345
barfs = set()
for a in range(num):
barf = self.container.schedule(10 * (a + 1), BarfOnTask())
class CancelBarf:
def __init__(self, barf):
self.barf = barf
def on_timer_task(self, event):
self.barf.cancel()
barfs.discard(self.barf)
pass
self.container.schedule(0, CancelBarf(barf))
barfs.add(barf)
start = time.time()
try:
self.container.run()
elapsed = time.time() - start
assert elapsed < num, "expected cancelled task to not delay the reactor by %s" % elapsed
assert not barfs, "expected all barfs to be discarded"
except Barf:
assert False, "expected barf to be cancelled"
class ApplicationEventTest(Test):
"""Test application defined events and handlers."""
class MyTestServer(TestServer):
def __init__(self):
super(ApplicationEventTest.MyTestServer, self).__init__()
class MyHandler(Handler):
def __init__(self, test):
super(ApplicationEventTest.MyHandler, self).__init__()
self._test = test
def on_hello(self, event):
# verify PROTON-1056
self._test.hello_rcvd = str(event)
def on_goodbye(self, event):
self._test.goodbye_rcvd = str(event)
def setUp(self):
import os
if not hasattr(os, 'pipe'):
# KAG: seems like Jython doesn't have an os.pipe() method
raise SkipTest()
if os.name=="nt":
# Correct implementation on Windows is complicated
raise SkipTest("PROTON-1071")
self.server = ApplicationEventTest.MyTestServer()
self.server.reactor.handler.add(ApplicationEventTest.MyHandler(self))
self.event_injector = EventInjector()
self.hello_event = ApplicationEvent("hello")
self.goodbye_event = ApplicationEvent("goodbye")
self.server.reactor.selectable(self.event_injector)
self.hello_rcvd = None
self.goodbye_rcvd = None
self.server.start()
def tearDown(self):
self.server.stop()
def _wait_for(self, predicate, timeout=10.0):
deadline = time.time() + timeout
while time.time() < deadline:
if predicate():
break
time.sleep(0.1)
assert predicate()
def test_application_events(self):
self.event_injector.trigger(self.hello_event)
self._wait_for(lambda: self.hello_rcvd is not None)
self.event_injector.trigger(self.goodbye_event)
self._wait_for(lambda: self.goodbye_rcvd is not None)
class AuthenticationTestHandler(MessagingHandler):
def __init__(self):
super(AuthenticationTestHandler, self).__init__()
port = free_tcp_port()
self.url = "localhost:%i" % port
self.verified = False
def on_start(self, event):
self.listener = event.container.listen(self.url)
def on_connection_opened(self, event):
event.connection.close()
def on_connection_opening(self, event):
assert event.connection.transport.user == "user@proton"
self.verified = True
def on_connection_closed(self, event):
event.connection.close()
self.listener.close()
def on_connection_error(self, event):
event.connection.close()
self.listener.close()
class ContainerTest(Test):
"""Test container subclass of reactor."""
def test_event_has_container_attribute(self):
ensureCanTestExtendedSASL()
class TestHandler(MessagingHandler):
def __init__(self):
super(TestHandler, self).__init__()
port = free_tcp_port()
self.url = "localhost:%i" % port
def on_start(self, event):
self.listener = event.container.listen(self.url)
def on_connection_closing(self, event):
event.connection.close()
self.listener.close()
test_handler = TestHandler()
container = Container(test_handler)
class ConnectionHandler(MessagingHandler):
def __init__(self):
super(ConnectionHandler, self).__init__()
def on_connection_opened(self, event):
event.connection.close()
assert event.container is event.reactor
assert event.container is container
container.connect(test_handler.url, handler=ConnectionHandler())
container.run()
def test_authentication_via_url(self):
ensureCanTestExtendedSASL()
test_handler = AuthenticationTestHandler()
container = Container(test_handler)
container.connect("%s:password@%s" % ("user%40proton", test_handler.url), reconnect=False)
container.run()
assert test_handler.verified
def test_authentication_via_container_attributes(self):
ensureCanTestExtendedSASL()
test_handler = AuthenticationTestHandler()
container = Container(test_handler)
container.user = "user@proton"
container.password = "password"
container.connect(test_handler.url, reconnect=False)
container.run()
assert test_handler.verified
def test_authentication_via_kwargs(self):
ensureCanTestExtendedSASL()
test_handler = AuthenticationTestHandler()
container = Container(test_handler)
container.connect(test_handler.url, user="user@proton", password="password", reconnect=False)
container.run()
assert test_handler.verified
class _ServerHandler(MessagingHandler):
def __init__(self, host):
super(ContainerTest._ServerHandler, self).__init__()
self.host = host
self.port = free_tcp_port()
self.client_addr = None
self.peer_hostname = None
def on_start(self, event):
self.listener = event.container.listen("%s:%s" % (self.host, self.port))
def on_connection_opened(self, event):
self.client_addr = event.connected_address
self.peer_hostname = event.connection.remote_hostname
def on_connection_closing(self, event):
event.connection.close()
self.listener.close()
class _ClientHandler(MessagingHandler):
def __init__(self):
super(ContainerTest._ClientHandler, self).__init__()
self.server_addr = None
def on_connection_opened(self, event):
self.server_addr = event.connected_address
event.connection.close()
def test_numeric_hostname(self):
ensureCanTestExtendedSASL()
server_handler = ContainerTest._ServerHandler("127.0.0.1")
client_handler = ContainerTest._ClientHandler()
container = Container(server_handler)
container.connect(url=Url(host="127.0.0.1",
port=server_handler.port),
handler=client_handler)
container.run()
assert server_handler.client_addr
assert client_handler.server_addr
assert server_handler.peer_hostname == "127.0.0.1", server_handler.peer_hostname
assert client_handler.server_addr.rsplit(':', 1)[1] == str(server_handler.port)
def test_non_numeric_hostname(self):
ensureCanTestExtendedSASL()
server_handler = ContainerTest._ServerHandler("localhost")
client_handler = ContainerTest._ClientHandler()
container = Container(server_handler)
container.connect(url=Url(host="localhost",
port=server_handler.port),
handler=client_handler)
container.run()
assert server_handler.client_addr
assert client_handler.server_addr
assert server_handler.peer_hostname == "localhost", server_handler.peer_hostname
assert client_handler.server_addr.rsplit(':', 1)[1] == str(server_handler.port)
def test_virtual_host(self):
ensureCanTestExtendedSASL()
server_handler = ContainerTest._ServerHandler("localhost")
container = Container(server_handler)
conn = container.connect(url=Url(host="localhost",
port=server_handler.port),
handler=ContainerTest._ClientHandler(),
virtual_host="a.b.c.org")
container.run()
assert server_handler.peer_hostname == "a.b.c.org", server_handler.peer_hostname
def test_no_virtual_host(self):
# explicitly setting an empty virtual host should prevent the hostname
# field from being sent in the Open performative when using the
# Python Container.
server_handler = ContainerTest._ServerHandler("localhost")
container = Container(server_handler)
conn = container.connect(url=Url(host="localhost",
port=server_handler.port),
handler=ContainerTest._ClientHandler(),
virtual_host="")
container.run()
assert server_handler.peer_hostname is None, server_handler.peer_hostname
class _ReconnectServerHandler(MessagingHandler):
def __init__(self, host, listen_on_error=True):
super(ContainerTest._ReconnectServerHandler, self).__init__()
self.host = host
self.port = free_tcp_port()
self.client_addr = None
self.peer_hostname = None
self.listen_on_error = listen_on_error
def on_connection_opened(self, event):
self.client_addr = event.connected_address
self.peer_hostname = event.connection.remote_hostname
self.listener.close()
def on_connection_closing(self, event):
event.connection.close()
def listen(self, container):
if self.listen_on_error:
self.listener = container.listen("%s:%s" % (self.host, self.port))
class _ReconnectClientHandler(MessagingHandler):
def __init__(self, server_handler):
super(ContainerTest._ReconnectClientHandler, self).__init__()
self.connect_failed = False
self.server_addr = None
self.server_handler = server_handler
def on_connection_opened(self, event):
self.server_addr = event.connected_address
event.connection.close()
def on_transport_error(self, event):
assert self.connect_failed == False
self.connect_failed = True
self.server_handler.listen(event.container)
def test_reconnect(self):
server_handler = ContainerTest._ReconnectServerHandler("localhost", listen_on_error=True)
client_handler = ContainerTest._ReconnectClientHandler(server_handler)
container = Container(server_handler)
container.connect(url=Url(host="localhost", port=server_handler.port),
handler=client_handler)
container.run()
assert server_handler.peer_hostname == 'localhost', server_handler.peer_hostname
assert client_handler.connect_failed
assert client_handler.server_addr == Url(host='localhost', port=server_handler.port), client_handler.server_addr
def test_not_reconnecting(self):
server_handler = ContainerTest._ReconnectServerHandler("localhost", listen_on_error=False)
client_handler = ContainerTest._ReconnectClientHandler(server_handler)
container = Container(server_handler)
container.connect(url=Url(host="localhost", port=server_handler.port),
handler=client_handler, reconnect=False)
container.run()
assert server_handler.peer_hostname == None, server_handler.peer_hostname
assert client_handler.connect_failed
assert client_handler.server_addr == None, client_handler.server_addr
class SelectorTest(Test):
"""Test the Selector"""
def test_unicode_selector(self):
assert Selector(u"Hello").filter_set[symbol('selector')].value == u"Hello"
def test_non_unicode_selector(self):
assert Selector(b"Hello").filter_set[symbol('selector')].value == u"Hello"
|
irinabov/debian-qpid-proton
|
python/tests/proton_tests/reactor.py
|
Python
|
apache-2.0
| 19,807
|
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2008, California Institute of Technology
# Copyright (c) 2017, Albert-Ludwigs-Universität Freiburg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Andrew D. Straw
from __future__ import division, print_function
import numpy as np
import matplotlib.delaunay as dlny
import precomputed_buchner71 as precomputed_buchner_1971
from util import get_mean_interommatidial_distance, xyz2lonlat
from mpl_toolkits.basemap import Basemap # basemap > 0.9.9.1
def do_projection( proj, lon_lats, dists, xres = 120, yres = 100 ):
xys = np.array([proj( lon, lat ) for (lon,lat) in lon_lats ])
x = xys[:,0]
y = xys[:,1]
good = x < 1e29 # basemap seems to set bad values to 1e30
x=x[good]
y=y[good]
dists=dists[good]
tri = dlny.Triangulation(x, y)
interp = tri.nn_interpolator(dists)
X,Y = np.mgrid[ min(y):max(y):yres*1j, min(x):max(x):xres*1j]
vals = interp[ min(y):max(y):yres*1j, min(x):max(x):xres*1j]
Z = np.ma.masked_array(vals,mask=np.isnan(vals))
return x,y,X,Y,Z
def main():
rdirs = precomputed_buchner_1971.receptor_dirs
rdir_slicer = precomputed_buchner_1971.receptor_dir_slicer
triangles = precomputed_buchner_1971.triangles
dists = np.array(get_mean_interommatidial_distance( rdirs[rdir_slicer['left']], triangles ))
R2D = 180.0/np.pi
dists = dists*R2D
lon_lats = [xyz2lonlat(*rdir) for rdir in rdirs[rdir_slicer['left']]]
stere = Basemap(projection='stere',
resolution=None,
lat_ts = 0.0,
lat_0 = 0,
lon_0 = 90,
llcrnrlon = -45,
urcrnrlon = -135,
llcrnrlat= -30,
urcrnrlat = 30,
)
x,y,X,Y,Z = do_projection(stere,lon_lats,dists)
import matplotlib
rcParams = matplotlib.rcParams
rcParams['font.size'] = 10
rcParams['font.family'] = 'serif'
#rcParams['font.serif'] = 'Times'
#rcParams['font.sans-serif'] = 'Arial'
if 0:
from matplotlib import verbose
verbose.level = 'debug-annoying'
import matplotlib.pyplot as plt
import matplotlib.cm
# pcolor figure -- stereographic projection
fig3 = plt.figure(3)
ax = plt.subplot(1,1,1)
plt.pcolor(Y,X,Z,shading='flat',cmap=matplotlib.cm.jet_r)
ax.plot(x,y,'wo',ms=4.0)
ax.text( 0.5,0.99, 'dorsal',
horizontalalignment='center',
verticalalignment='top',
transform=ax.transAxes)
ax.text( 0.01,0.5, 'anterior',
horizontalalignment='left',
transform=ax.transAxes)
ax.text( 0.99,0.5, 'posterior',
horizontalalignment='right',
transform=ax.transAxes)
# draw parallels and meridians.
delat = 20.
circles = np.arange(0.,90.,delat).tolist()+\
np.arange(-delat,-90,-delat).tolist()
#biw.stere.drawparallels(circles,ax=ax)
stere.drawparallels(circles,ax=ax)
delon = 45.
meridians = np.arange(-180,180,delon)
stere.drawmeridians(meridians,ax=ax)
cbar = plt.colorbar()
cbar.ax.set_ylabel('mean inter-ommatidial distance (deg)')
save_fname = 'interommatidial_distance.png'
print('saving',save_fname)
fig3.savefig(save_fname)
print('OK')
# contour figure -- stereographic projection
fig1 = plt.figure(1)
ax = plt.subplot(1,1,1)
CS = plt.contour(Y,X,Z,
#levels=np.linspace(4,16,25),
levels=np.linspace(4,16,13),
colors='k',
)
plt.clabel(CS,
fmt='%.1f',
colors='k')
ax.plot(x,y,'wo',ms=4.0)
ax.text( 0.5,0.99, 'dorsal',
horizontalalignment='center',
verticalalignment='top',
transform=ax.transAxes)
ax.text( 0.01,0.5, 'anterior',
horizontalalignment='left',
transform=ax.transAxes)
ax.text( 0.99,0.5, 'posterior',
horizontalalignment='right',
transform=ax.transAxes)
# draw parallels and meridians.
delat = 20.
circles = np.arange(0.,90.,delat).tolist()+\
np.arange(-delat,-90,-delat).tolist()
stere.drawparallels(circles,ax=ax)
delon = 45.
meridians = np.arange(-180,180,delon)
stere.drawmeridians(meridians,
ax=ax)
# contour figure -- orthographic projection
fig2 = plt.figure(2)
# Match projection of
# http://jeb.biologists.org/cgi/content/full/209/21/4339/FIG1
ortho = Basemap(projection='ortho',
lat_0=10,
lon_0=20,
resolution=None,
)
x,y,X,Y,Z = do_projection(ortho,lon_lats,dists,xres=500,yres=500)
ax = plt.subplot(1,1,1)
CS = plt.contour(Y,X,Z,
#levels=np.linspace(4,16,25),
levels=np.arange(4,16.5,1.0),
colors='k',
linewidths=2.0,
)
plt.clabel(CS,fmt='%.1f',colors='k')
#ax.plot(x,y,'wo',ms=4.0)
ax.set_aspect('equal')
# draw parallels and meridians.
delat = 10.
circles = np.arange(0.,90.,delat).tolist()+\
np.arange(-delat,-90,-delat).tolist()
circles = [c for c in circles if c!= 0.0]
ortho.drawparallels(circles,ax=ax)
ortho.drawparallels([0.0],linestyle='-',dashes=[],ax=ax)
delon = 10.
meridians = np.arange(-180,180,delon).tolist()
meridians = [m for m in meridians if m!=0.0]
ortho.drawmeridians(meridians,ax=ax)
ortho.drawmeridians([0.0],linestyle='-',dashes=[],ax=ax)
ortho.drawmapboundary(ax=ax)
# Text p. 12 of Heisenberg, M. and Wolf, R., (1984) implies this was a
# female.
#plt.title(unichr(2640)+' average inter-ommatidial angle (degrees)')
plt.title('female average inter-ommatidial angle (degrees)')
save_fname = 'interommatidial_distance_ortho.png'
print('saving',save_fname)
fig2.savefig(save_fname)
print('OK')
plt.show()
if __name__ == '__main__':
main()
|
strawlab/drosophila_eye_map
|
drosophila_eye_map/make_buchner_interommatidial_distance_figure.py
|
Python
|
bsd-2-clause
| 7,446
|
# -*- coding: utf-8 -*-
# The definition of an OrObject
class OrObject(object):
overrides = {}
def __init__(self, name="[anon]", classobj=None):
assert type(name) == type(""), "Name must be a string"
self.dict = {}
self.set("$$name", name)
self.set("$$class", classobj)
def ispy(self):
return "$$python" in self.dict
def topy(self):
try:
return self.dict["$$python"]
except KeyError:
raise NotImplementedError
def isnil(self):
return self.ispy() and self.topy() is None
def tagged(self, tag):
return self.has("$$tags") and tag in self.get("$$tags")
def get(self, key):
if key == "$$dict":
return OrObject.from_py(self.dict)
elif key in self.dict:
val = self.dict[key]
elif self.ispy() and hasattr(self.topy(), key):
return getattr(self.topy(), key)
else:
raise AttributeError(key + " is not an attribute of " + repr(self))
if self.ispy() or not isinstance(val, OrObject) or key in ("$$class", ) or not callable(val):
return val
elif val.tagged("Static"):
return val
elif val.tagged("Class"):
if isinstance(self.get("$$class"), OrObject):
def classmethod_wrapper(*args, **kwargs):
return val(self.get("$$class"), *args, **kwargs)
else:
def classmethod_wrapper(*args, **kwargs):
return val(self, *args, **kwargs)
return classmethod_wrapper
elif isinstance(self.get("$$class"), OrObject):
def instancemethod_wrapper(*args, **kwargs):
return val(self, *args, **kwargs)
return instancemethod_wrapper
else:
return val
def set(self, key, value):
self.dict[key] = value
def delete(self, key):
try:
del self.dict[key]
except KeyError:
delattr(self.topy(), key)
def has(self, key):
return key in self.dict or self.ispy() and hasattr(self.topy(), key)
def __str__(self):
if self.has("$$str"):
return str(self.get("$$str")())
elif self.ispy():
return str(self.topy())
else:
s = "<"
if self.has("$$class"):
if isinstance(self.get("$$class"), OrObject) and self.get("$$class").has("$$name"):
s += str(self.get("$$class").get("$$name")) + " "
elif hasattr(self.get("$$class"), "__name__"):
if hasattr(self.get("$$class"), "class_name"):
s += self.get("$$class").class_name + " "
else:
s += str(self.get("$$class").__name__) + " "
if self.has("$$name"):
s += str(self.get("$$name"))
return s + ">"
def __repr__(self):
if isinstance(self.get("$$class"), OrObject) and \
self.get("$$class").has("$$repr"):
return str(self.get("$$class").get("$$repr")(self))
elif self.has("$$repr"):
return str(self.get("$$repr")())
elif self.ispy():
return repr(self.topy())
else:
return self.__str__()
def __eq__(self, other):
a = self.topy() if self.ispy() else self
b = other.topy() if hasattr(other, "ispy") and other.ispy() else other
return a == b
def __hash__(self):
if self.ispy():
try:
return hash(self.topy())
except TypeError:
return id(self.topy())
else:
return id(self)
def del_(self):
if self.ispy() and hasattr(self.topy(), "__del__"):
return self.topy().__del__
elif self.has("$$del"):
return self.get("$$del")()
def __iter__(self):
if self.ispy() and hasattr(self.topy(), "__iter__"):
import itertools
return itertools.imap(OrObject.from_py, self.topy().__iter__())
elif self.has("$$iter"):
return self.get("$$iter")()
else:
return AttributeError("Iteration not supported by %s" % repr(self))
def __nonzero__(self):
if self.ispy():
return bool(self.topy())
elif self.has("$$bool"):
return self.get("$$bool")()
else:
return True
def __getattr__(self, key):
if key in self.__dict__ and key.startswith("__") and \
key not in ("__class__", "__name__",
"__dict__", "__weakref__", "__getattr__", "__setattr__",
"__delattr__"):
setattr(OrObject, key, mk_method(key))
def wrap_method(*args, **kwargs):
return getattr(OrObject, key)(self, *args, **kwargs)
return wrap_method
elif key in self.__dict__:
return self.__dict__(key)
else:
raise AttributeError
@classmethod
def register(cls, new, *args):
for i in args:
cls.overrides[i] = new
@staticmethod
def from_py(obj):
if isinstance(obj, OrObject): return obj
if type(obj) in OrObject.overrides:
return OrObject.overrides[type(obj)](obj)
n = obj.__name__ if hasattr(obj, "__name__") else "[anon]"
c = type(obj) if hasattr(obj, "__class__") else None
np = OrObject(n, c)
np.set("$$python", obj)
return np
def mk_method(name):
def method_wrapper(self, *args):
if self.ispy() and hasattr(self.topy(), name) and all(not hasattr(i, "ispy") or i.ispy() for i in args):
args = map(lambda x: x.topy() if hasattr(x, "ispy") else x, args)
obj = getattr(self.topy(), name)
return obj(*args)
else:
if "attr" in name:
raise AttributeError("Object does not support this operation")
elif name == "__nonzero__":
return True
else:
raise NotImplementedError
return method_wrapper
|
pavpanchekha/oranj
|
oranj/core/objects/orobject.py
|
Python
|
gpl-3.0
| 6,197
|
from django.shortcuts import render, get_object_or_404
from django.views import generic
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from rest_framework import reverse
from druidapi.query.models import QueryModel
from models import Result
from forms import SearchForm
import requests
import json
class IndexView(generic.View):
"""
The view for the main page, where the search form is
"""
def get(self, request):
form = SearchForm
return render(request, 'index.html', {'form': form})
def post(self, request):
form = SearchForm(request.POST)
if form.is_valid():
# Little bit of cheating, ideally the html would handle this
# but, I felt like building the webapp in django...
# alternatively, I could just reach over and build this.
start = form.cleaned_data['start'].isoformat()
end = form.cleaned_data['end'].isoformat()
# POST the query and return the pk, so we can look it up later
r = requests.post('http://localhost:9000/api/query/', data={'start_date': start, 'end_date': end})
result = Result.objects.create(key=r.json()["pk"])
result.save()
# To the results!
return HttpResponseRedirect("/{0}/".format(r.json()["pk"]))
else:
return render(request, 'index.html', {'form': form})
class ResultsView(generic.View):
"""
When the search is executed, it needs to display the results...
"""
def get(self, request, pk):
result = Result.objects.get(key=pk)
# GET the results for the key we're given
r = requests.get("http://localhost:9000/api/query/{0}/execute/".format(pk))
result.data = r.json()
return render(request, 'results.html', {'result': result})
|
nalabelle/druid-django
|
frontend/views.py
|
Python
|
mit
| 1,868
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.