text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from google.cloud import api_keys_v2
async def sample_update_key():
# Create a client
client = api_keys_v2.ApiKeysAsyncClient()
# Initialize request argument(s)
request = api_keys_v2.UpdateKeyRequest(
)
# Make the request
operation = client.update_key(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END apikeys_v2_generated_ApiKeys_UpdateKey_async]
|
{
"content_hash": "28cadaf8b1e129732f8895f42c34747c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 52,
"avg_line_length": 22.454545454545453,
"alnum_prop": 0.694331983805668,
"repo_name": "googleapis/python-api-keys",
"id": "1aa1d4b0a9d5c9139ef5d856e50c1e986fec0216",
"size": "1867",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/apikeys_v2_generated_api_keys_update_key_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "472493"
},
{
"name": "Shell",
"bytes": "30666"
}
],
"symlink_target": ""
}
|
"""SCons.Tool.Packaging.targz
The targz SRC packager.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/targz.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.gz')
target, source = stripinstallbuilder(target, source, env)
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
return bld(env, target, source, TARFLAGS='-zc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "d67f31a28e1dd7998d80cf67aaff9d64",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 114,
"avg_line_length": 40.38636363636363,
"alnum_prop": 0.7540799099606078,
"repo_name": "pzajda/eloquence",
"id": "d0c80dbfeb412f48bc8610de7a79c5e24b342c0e",
"size": "1777",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scons-local-2.5.0/SCons/Tool/packaging/targz.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1927564"
},
{
"name": "Smarty",
"bytes": "226"
}
],
"symlink_target": ""
}
|
from ragendja.settings_post import *
add_uncombined_app_media(globals(), 'django_aep_export.admin_media')
|
{
"content_hash": "db07d1916477c766676d165b15eaff5f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 68,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.7757009345794392,
"repo_name": "tallstreet/jaikuenginepatch",
"id": "ab7cb650ba3e4411d774ef7f4a5d869eabfda1ec",
"size": "107",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "aepcommon/django_aep_export/admin_media/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "115039"
},
{
"name": "Python",
"bytes": "1011754"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "5208"
}
],
"symlink_target": ""
}
|
"""Provides agency object."""
from __future__ import absolute_import
from .. import t1types
from ..entity import Entity
class Agency(Entity):
"""Agency entity."""
collection = 'agencies'
resource = 'agency'
_relations = {
'organization', 'billing_contact', 'sales_contact',
'traffic_contact',
}
_dmp_settings = t1types.enum({'disabled', 'inherits'}, 'inherits')
_pull = {
'allow_x_adv_optimization': t1types.int_to_bool,
'allow_x_adv_pixels': t1types.int_to_bool,
'billing_contact_id': int,
'created_on': t1types.strpt,
'dmp_enabled': None,
'id': int,
'logo': None,
'name': None,
'organization_id': int,
'sales_contact_id': int,
'status': t1types.int_to_bool,
'traffic_contact_id': int,
'updated_on': t1types.strpt,
'version': int,
}
_push = _pull.copy()
_push.update({
'allow_x_adv_optimization': int,
'allow_x_adv_pixels': int,
'dmp_enabled': _dmp_settings,
'status': int,
})
def __init__(self, session, properties=None, **kwargs):
super(Agency, self).__init__(session, properties, **kwargs)
|
{
"content_hash": "b12eb076d3192cbf8f1bc24243db9ec5",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 70,
"avg_line_length": 28.904761904761905,
"alnum_prop": 0.5634266886326195,
"repo_name": "MediaMath/t1-python",
"id": "8df74a9e3ee595ac47bec430148d5640d38c0c15",
"size": "1238",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "terminalone/models/agency.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "238783"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, unicode_literals
from future.builtins import int, open, str
from hashlib import md5
import os
try:
from urllib.parse import quote, unquote
except ImportError:
from urllib import quote, unquote
from django.apps import apps
from django.contrib import admin
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.sites.models import Site
from django.core.files import File
from django.core.files.storage import default_storage
from django.core.urlresolvers import reverse, resolve, NoReverseMatch
from django.db.models import Model
from django.template import Context, Node, Template, TemplateSyntaxError
from django.template.base import (TOKEN_BLOCK, TOKEN_COMMENT,
TOKEN_TEXT, TOKEN_VAR, TextNode)
from django.template.defaultfilters import escape
from django.template.loader import get_template
from django.utils import translation
from django.utils.html import strip_tags
from django.utils.text import capfirst
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.forms import get_edit_form
from mezzanine.utils.cache import nevercache_token, cache_installed
from mezzanine.utils.html import decode_entities
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.sites import current_site_id, has_site_permission
from mezzanine.utils.urls import admin_url
from mezzanine.utils.views import is_editable
from mezzanine import template
register = template.Library()
if "compressor" in settings.INSTALLED_APPS:
@register.tag
def compress(parser, token):
"""
Shadows django-compressor's compress tag so it can be
loaded from ``mezzanine_tags``, allowing us to provide
a dummy version when django-compressor isn't installed.
"""
from compressor.templatetags.compress import compress
return compress(parser, token)
else:
@register.to_end_tag
def compress(parsed, context, token):
"""
Dummy tag for fallback when django-compressor isn't installed.
"""
return parsed
if cache_installed():
@register.tag
def nevercache(parser, token):
"""
Tag for two phased rendering. Converts enclosed template
code and content into text, which gets rendered separately
in ``mezzanine.core.middleware.UpdateCacheMiddleware``.
This is to bypass caching for the enclosed code and content.
"""
text = []
end_tag = "endnevercache"
tag_mapping = {
TOKEN_TEXT: ("", ""),
TOKEN_VAR: ("{{", "}}"),
TOKEN_BLOCK: ("{%", "%}"),
TOKEN_COMMENT: ("{#", "#}"),
}
delimiter = nevercache_token()
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == end_tag:
return TextNode(delimiter + "".join(text) + delimiter)
start, end = tag_mapping[token.token_type]
text.append("%s%s%s" % (start, token.contents, end))
parser.unclosed_block_tag(end_tag)
else:
@register.to_end_tag
def nevercache(parsed, context, token):
"""
Dummy fallback ``nevercache`` for when caching is not
configured.
"""
return parsed
@register.simple_tag(takes_context=True)
def fields_for(context, form, template="includes/form_fields.html"):
"""
Renders fields for a form with an optional template choice.
"""
context["form_for_fields"] = form
return get_template(template).render(Context(context))
@register.inclusion_tag("includes/form_errors.html", takes_context=True)
def errors_for(context, form):
"""
Renders an alert if the form has any errors.
"""
context["form"] = form
return context
@register.filter
def sort_by(items, attr):
"""
General sort filter - sorts by either attribute or key.
"""
def key_func(item):
try:
return getattr(item, attr)
except AttributeError:
try:
return item[attr]
except TypeError:
getattr(item, attr) # Reraise AttributeError
return sorted(items, key=key_func)
@register.filter
def is_installed(app_name):
"""
Returns ``True`` if the given app name is in the
``INSTALLED_APPS`` setting.
"""
from warnings import warn
warn("The is_installed filter is deprecated. Please use the tag "
"{% ifinstalled appname %}{% endifinstalled %}")
return app_name in settings.INSTALLED_APPS
@register.tag
def ifinstalled(parser, token):
"""
Old-style ``if`` tag that renders contents if the given app is
installed. The main use case is:
{% ifinstalled app_name %}
{% include "app_name/template.html" %}
{% endifinstalled %}
so we need to manually pull out all tokens if the app isn't
installed, since if we used a normal ``if`` tag with a False arg,
the include tag will still try and find the template to include.
"""
try:
tag, app = token.split_contents()
except ValueError:
raise TemplateSyntaxError("ifinstalled should be in the form: "
"{% ifinstalled app_name %}"
"{% endifinstalled %}")
end_tag = "end" + tag
unmatched_end_tag = 1
if app.strip("\"'") not in settings.INSTALLED_APPS:
while unmatched_end_tag:
token = parser.tokens.pop(0)
if token.token_type == TOKEN_BLOCK:
block_name = token.contents.split()[0]
if block_name == tag:
unmatched_end_tag += 1
if block_name == end_tag:
unmatched_end_tag -= 1
parser.tokens.insert(0, token)
nodelist = parser.parse((end_tag,))
parser.delete_first_token()
class IfInstalledNode(Node):
def render(self, context):
return nodelist.render(context)
return IfInstalledNode()
@register.render_tag
def set_short_url_for(context, token):
"""
Sets the ``short_url`` attribute of the given model for share
links in the template.
"""
obj = context[token.split_contents()[1]]
obj.set_short_url()
return ""
@register.simple_tag
def gravatar_url(email, size=32):
"""
Return the full URL for a Gravatar given an email hash.
"""
bits = (md5(email.lower().encode("utf-8")).hexdigest(), size)
return "//www.gravatar.com/avatar/%s?s=%s&d=identicon&r=PG" % bits
@register.to_end_tag
def metablock(parsed):
"""
Remove HTML tags, entities and superfluous characters from
meta blocks.
"""
parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",")
return escape(strip_tags(decode_entities(parsed)))
@register.inclusion_tag("includes/pagination.html", takes_context=True)
def pagination_for(context, current_page, page_var="page", exclude_vars=""):
"""
Include the pagination template and data for persisting querystring
in pagination links. Can also contain a comma separated string of
var names in the current querystring to exclude from the pagination
links, via the ``exclude_vars`` arg.
"""
querystring = context["request"].GET.copy()
exclude_vars = [v for v in exclude_vars.split(",") if v] + [page_var]
for exclude_var in exclude_vars:
if exclude_var in querystring:
del querystring[exclude_var]
querystring = querystring.urlencode()
return {
"current_page": current_page,
"querystring": querystring,
"page_var": page_var,
}
@register.inclusion_tag("includes/search_form.html", takes_context=True)
def search_form(context, search_model_names=None):
"""
Includes the search form with a list of models to use as choices
for filtering the search by. Models should be a string with models
in the format ``app_label.model_name`` separated by spaces. The
string ``all`` can also be used, in which case the models defined
by the ``SEARCH_MODEL_CHOICES`` setting will be used.
"""
if not search_model_names or not settings.SEARCH_MODEL_CHOICES:
search_model_names = []
elif search_model_names == "all":
search_model_names = list(settings.SEARCH_MODEL_CHOICES)
else:
search_model_names = search_model_names.split(" ")
search_model_choices = []
for model_name in search_model_names:
try:
model = apps.get_model(*model_name.split(".", 1))
except LookupError:
pass
else:
verbose_name = model._meta.verbose_name_plural.capitalize()
search_model_choices.append((verbose_name, model_name))
context["search_model_choices"] = sorted(search_model_choices)
return context
@register.simple_tag
def thumbnail(image_url, width, height, upscale=True, quality=95, left=.5,
top=.5, padding=False, padding_color="#fff"):
"""
Given the URL to an image, resizes the image using the given width
and height on the first time it is requested, and returns the URL
to the new resized image. If width or height are zero then original
ratio is maintained. When ``upscale`` is False, images smaller than
the given size will not be grown to fill that size. The given width
and height thus act as maximum dimensions.
"""
if not image_url:
return ""
try:
from PIL import Image, ImageFile, ImageOps
except ImportError:
return ""
image_url = unquote(str(image_url)).split("?")[0]
if image_url.startswith(settings.MEDIA_URL):
image_url = image_url.replace(settings.MEDIA_URL, "", 1)
image_dir, image_name = os.path.split(image_url)
image_prefix, image_ext = os.path.splitext(image_name)
filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext, "JPEG")
thumb_name = "%s-%sx%s" % (image_prefix, width, height)
if not upscale:
thumb_name += "-no-upscale"
if left != .5 or top != .5:
left = min(1, max(0, left))
top = min(1, max(0, top))
thumb_name = "%s-%sx%s" % (thumb_name, left, top)
thumb_name += "-padded-%s" % padding_color if padding else ""
thumb_name = "%s%s" % (thumb_name, image_ext)
# `image_name` is used here for the directory path, as each image
# requires its own sub-directory using its own name - this is so
# we can consistently delete all thumbnails for an individual
# image, which is something we do in filebrowser when a new image
# is written, allowing us to purge any previously generated
# thumbnails that may match a new image name.
thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir,
settings.THUMBNAILS_DIR_NAME, image_name)
if not os.path.exists(thumb_dir):
try:
os.makedirs(thumb_dir)
except OSError:
pass
thumb_path = os.path.join(thumb_dir, thumb_name)
thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME,
quote(image_name.encode("utf-8")),
quote(thumb_name.encode("utf-8")))
image_url_path = os.path.dirname(image_url)
if image_url_path:
thumb_url = "%s/%s" % (image_url_path, thumb_url)
try:
thumb_exists = os.path.exists(thumb_path)
except UnicodeEncodeError:
# The image that was saved to a filesystem with utf-8 support,
# but somehow the locale has changed and the filesystem does not
# support utf-8.
from mezzanine.core.exceptions import FileSystemEncodingChanged
raise FileSystemEncodingChanged()
if thumb_exists:
# Thumbnail exists, don't generate it.
return thumb_url
elif not default_storage.exists(image_url):
# Requested image does not exist, just return its URL.
return image_url
f = default_storage.open(image_url)
try:
image = Image.open(f)
except:
# Invalid image format.
return image_url
image_info = image.info
to_width = int(width)
to_height = int(height)
from_width = image.size[0]
from_height = image.size[1]
if not upscale:
to_width = min(to_width, from_width)
to_height = min(to_height, from_height)
# Set dimensions.
if to_width == 0:
to_width = from_width * to_height // from_height
elif to_height == 0:
to_height = from_height * to_width // from_width
if image.mode not in ("P", "L", "RGBA"):
try:
image = image.convert("RGBA")
except:
return image_url
# Required for progressive jpgs.
ImageFile.MAXBLOCK = 2 * (max(image.size) ** 2)
# Padding.
if padding and to_width and to_height:
from_ratio = float(from_width) / from_height
to_ratio = float(to_width) / to_height
pad_size = None
if to_ratio < from_ratio:
pad_height = int(to_height * (float(from_width) / to_width))
pad_size = (from_width, pad_height)
pad_top = (pad_height - from_height) // 2
pad_left = 0
elif to_ratio > from_ratio:
pad_width = int(to_width * (float(from_height) / to_height))
pad_size = (pad_width, from_height)
pad_top = 0
pad_left = (pad_width - from_width) // 2
if pad_size is not None:
pad_container = Image.new("RGBA", pad_size, padding_color)
pad_container.paste(image, (pad_left, pad_top))
image = pad_container
# Create the thumbnail.
to_size = (to_width, to_height)
to_pos = (left, top)
try:
image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos)
image = image.save(thumb_path, filetype, quality=quality, **image_info)
# Push a remote copy of the thumbnail if MEDIA_URL is
# absolute.
if "://" in settings.MEDIA_URL:
with open(thumb_path, "rb") as f:
default_storage.save(thumb_url, File(f))
except Exception:
# If an error occurred, a corrupted image may have been saved,
# so remove it, otherwise the check for it existing will just
# return the corrupted image next time it's requested.
try:
os.remove(thumb_path)
except Exception:
pass
return image_url
return thumb_url
@register.inclusion_tag("includes/editable_loader.html", takes_context=True)
def editable_loader(context):
"""
Set up the required JS/CSS for the in-line editing toolbar and controls.
"""
user = context["request"].user
context["has_site_permission"] = has_site_permission(user)
if settings.INLINE_EDITING_ENABLED and context["has_site_permission"]:
t = get_template("includes/editable_toolbar.html")
context["REDIRECT_FIELD_NAME"] = REDIRECT_FIELD_NAME
try:
context["editable_obj"]
except KeyError:
context["editable_obj"] = context.get("page", None)
context["toolbar"] = t.render(Context(context))
context["richtext_media"] = RichTextField().formfield().widget.media
return context
@register.filter
def richtext_filters(content):
"""
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
"""
filter_names = settings.RICHTEXT_FILTERS
if not filter_names:
try:
filter_names = [settings.RICHTEXT_FILTER]
except AttributeError:
pass
else:
from warnings import warn
warn("The `RICHTEXT_FILTER` setting is deprecated in favor of "
"the new plural setting `RICHTEXT_FILTERS`.")
for filter_name in filter_names:
filter_func = import_dotted_path(filter_name)
content = filter_func(content)
return content
@register.filter
def richtext_filter(content):
"""
Deprecated version of richtext_filters above.
"""
from warnings import warn
warn("The `richtext_filter` template tag is deprecated in favor of "
"the new plural tag `richtext_filters`.")
return richtext_filters(content)
@register.to_end_tag
def editable(parsed, context, token):
"""
Add the required HTML to the parsed content for in-line editing,
such as the icon and edit form if the object is deemed to be
editable - either it has an ``editable`` method which returns
``True``, or the logged in user has change permissions for the
model.
"""
def parse_field(field):
field = field.split(".")
obj = context.get(field.pop(0), None)
attr = field.pop()
while field:
obj = getattr(obj, field.pop(0))
if callable(obj):
# Allows {% editable page.get_content_model.content %}
obj = obj()
return obj, attr
fields = [parse_field(f) for f in token.split_contents()[1:]]
if fields:
fields = [f for f in fields if len(f) == 2 and f[0] is fields[0][0]]
if not parsed.strip():
try:
parsed = "".join([str(getattr(*field)) for field in fields])
except AttributeError:
pass
if settings.INLINE_EDITING_ENABLED and fields and "request" in context:
obj = fields[0][0]
if isinstance(obj, Model) and is_editable(obj, context["request"]):
field_names = ",".join([f[1] for f in fields])
context["editable_form"] = get_edit_form(obj, field_names)
context["original"] = parsed
t = get_template("includes/editable_form.html")
return t.render(Context(context))
return parsed
@register.simple_tag
def try_url(url_name):
"""
Mimics Django's ``url`` template tag but fails silently. Used for
url names in admin templates as these won't resolve when admin
tests are running.
"""
from warnings import warn
warn("try_url is deprecated, use the url tag with the 'as' arg instead.")
try:
url = reverse(url_name)
except NoReverseMatch:
return ""
return url
def admin_app_list(request):
"""
Adopted from ``django.contrib.admin.sites.AdminSite.index``.
Returns a list of lists of models grouped and ordered according to
``mezzanine.conf.ADMIN_MENU_ORDER``. Called from the
``admin_dropdown_menu`` template tag as well as the ``app_list``
dashboard widget.
"""
app_dict = {}
# Model or view --> (group index, group title, item index, item title).
menu_order = {}
for (group_index, group) in enumerate(settings.ADMIN_MENU_ORDER):
group_title, items = group
for (item_index, item) in enumerate(items):
if isinstance(item, (tuple, list)):
item_title, item = item
else:
item_title = None
menu_order[item] = (group_index, group_title,
item_index, item_title)
# Add all registered models, using group and title from menu order.
for (model, model_admin) in admin.site._registry.items():
opts = model._meta
in_menu = not hasattr(model_admin, "in_menu") or model_admin.in_menu()
if in_menu and request.user.has_module_perms(opts.app_label):
perms = model_admin.get_model_perms(request)
admin_url_name = ""
if perms["change"]:
admin_url_name = "changelist"
change_url = admin_url(model, admin_url_name)
else:
change_url = None
if perms["add"]:
admin_url_name = "add"
add_url = admin_url(model, admin_url_name)
else:
add_url = None
if admin_url_name:
model_label = "%s.%s" % (opts.app_label, opts.object_name)
try:
app_index, app_title, model_index, model_title = \
menu_order[model_label]
except KeyError:
app_index = None
app_title = opts.app_config.verbose_name.title()
model_index = None
model_title = None
else:
del menu_order[model_label]
if not model_title:
model_title = capfirst(model._meta.verbose_name_plural)
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": model_index,
"perms": model_admin.get_model_perms(request),
"name": model_title,
"admin_url": change_url,
"add_url": add_url
})
# Menu may also contain view or url pattern names given as (title, name).
for (item_url, item) in menu_order.items():
app_index, app_title, item_index, item_title = item
try:
item_url = reverse(item_url)
except NoReverseMatch:
continue
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": item_index,
"perms": {"custom": True},
"name": item_title,
"admin_url": item_url,
})
app_list = list(app_dict.values())
sort = lambda x: (x["index"] if x["index"] is not None else 999, x["name"])
for app in app_list:
app["models"].sort(key=sort)
app_list.sort(key=sort)
return app_list
@register.inclusion_tag("admin/includes/dropdown_menu.html",
takes_context=True)
def admin_dropdown_menu(context):
"""
Renders the app list for the admin dropdown menu navigation.
"""
user = context["request"].user
if user.is_staff:
context["dropdown_menu_app_list"] = admin_app_list(context["request"])
if user.is_superuser:
sites = Site.objects.all()
else:
sites = user.sitepermissions.sites.all()
context["dropdown_menu_sites"] = list(sites)
context["dropdown_menu_selected_site_id"] = current_site_id()
return context
@register.inclusion_tag("admin/includes/app_list.html", takes_context=True)
def app_list(context):
"""
Renders the app list for the admin dashboard widget.
"""
context["dashboard_app_list"] = admin_app_list(context["request"])
return context
@register.inclusion_tag("admin/includes/recent_actions.html",
takes_context=True)
def recent_actions(context):
"""
Renders the recent actions list for the admin dashboard widget.
"""
return context
@register.render_tag
def dashboard_column(context, token):
"""
Takes an index for retrieving the sequence of template tags from
``mezzanine.conf.DASHBOARD_TAGS`` to render into the admin
dashboard.
"""
column_index = int(token.split_contents()[1])
output = []
for tag in settings.DASHBOARD_TAGS[column_index]:
t = Template("{%% load %s %%}{%% %s %%}" % tuple(tag.split(".")))
output.append(t.render(Context(context)))
return "".join(output)
@register.simple_tag(takes_context=True)
def translate_url(context, language):
"""
Translates the current URL for the given language code, eg:
{% translate_url de %}
"""
try:
request = context["request"]
except KeyError:
return ""
view = resolve(request.path)
current_language = translation.get_language()
translation.activate(language)
try:
url = reverse(view.func, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
try:
url_name = (view.url_name if not view.namespace
else '%s:%s' % (view.namespace, view.url_name))
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
url_name = "admin:" + view.url_name
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
translation.activate(current_language)
if context['request'].META["QUERY_STRING"]:
url += "?" + context['request'].META["QUERY_STRING"]
return url
|
{
"content_hash": "22e830af9ce7ca1f80e0acf0ffa7c38c",
"timestamp": "",
"source": "github",
"line_count": 693,
"max_line_length": 79,
"avg_line_length": 35.58152958152958,
"alnum_prop": 0.6091734933895693,
"repo_name": "geodesign/mezzanine",
"id": "f9027b6e00f98b33dafe78fd83975d0b27368c0d",
"size": "24658",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mezzanine/core/templatetags/mezzanine_tags.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "60198"
},
{
"name": "HTML",
"bytes": "89068"
},
{
"name": "JavaScript",
"bytes": "453635"
},
{
"name": "Python",
"bytes": "654225"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
# PythonJS to JavaScript Translator
# by Amirouche Boubekki and Brett Hartshorn - copyright 2013
# License: "New BSD"
import os, sys
from types import GeneratorType
import ast
from ast import Str
from ast import Name
from ast import Tuple
from ast import parse
from ast import Attribute
from ast import NodeVisitor
import typedpython
import ast_utils
class SwapLambda( RuntimeError ):
def __init__(self, node):
self.node = node
RuntimeError.__init__(self)
class JSGenerator(ast_utils.NodeVisitorBase):
def __init__(self, source, requirejs=True, insert_runtime=True, webworker=False, function_expressions=True, fast_javascript=False, fast_loops=False):
assert source
ast_utils.NodeVisitorBase.__init__(self, source)
self._fast_js = fast_javascript
self._fast_loops = fast_loops
self._func_expressions = function_expressions
self._indent = 0
self._global_functions = {}
self._function_stack = []
self._requirejs = requirejs
self._insert_runtime = insert_runtime
self._webworker = webworker
self._exports = set()
self._inline_lambda = False
self.catch_call = set() ## subclasses can use this to catch special calls
self.special_decorators = set(['__typedef__', '__glsl__', '__pyfunction__', 'expression'])
self._glsl = False ## TODO deprecate
self._has_glsl = False ## TODO deprecate
self.glsl_runtime = 'int _imod(int a, int b) { return int(mod(float(a),float(b))); }' ## TODO deprecate
self._typed_vars = dict()
self._lua = False
self._dart = False
self._go = False
self._rust = False
self._cpp = False
self._cheader = []
self._cppheader = []
self._cpp_class_impl = []
self._match_stack = [] # dicts of cases
self._rename_hacks = {} ## used by c++ backend, to support `if isinstance`
self._globals = {} ## name : type
def reset(self):
self._cheader = []
self._cppheader = []
self._cpp_class_impl = []
self._match_stack = []
def is_prim_type(self, T):
prims = 'bool int float double long string str char byte i32 i64 f32 f64 std::string cstring'.split()
if T in prims:
return True
else:
return False
def indent(self): return '\t' * self._indent
def push(self): self._indent += 1
def pull(self):
if self._indent > 0: self._indent -= 1
def visit_ClassDef(self, node):
raise NotImplementedError(node)
def visit_Global(self, node):
return '/*globals: %s */' %','.join(node.names)
def visit_Assign(self, node):
# XXX: I'm not sure why it is a list since, mutiple targets are inside a tuple
target = node.targets[0]
if isinstance(target, Tuple):
raise NotImplementedError('target tuple assignment should have been transformed to flat assignment by python_to_pythonjs.py')
else:
target = self.visit(target)
value = self.visit(node.value)
code = '%s = %s;' % (target, value)
if self._requirejs and target not in self._exports and self._indent == 0 and '.' not in target:
self._exports.add( target )
return code
def visit_AugAssign(self, node):
## n++ and n-- are slightly faster than n+=1 and n-=1
target = self.visit(node.target)
op = self.visit(node.op)
value = self.visit(node.value)
if op=='+' and isinstance(node.value, ast.Num) and node.value.n == 1:
a = '%s ++;' %target
if op=='-' and isinstance(node.value, ast.Num) and node.value.n == 1:
a = '%s --;' %target
else:
a = '%s %s= %s;' %(target, op, value)
return a
def visit_With(self, node):
'''
unified with-hacked syntax for all backends
'''
r = []
is_select = False
is_switch = False
is_match = False
is_case = False
is_extern = False
has_default = False
if isinstance( node.context_expr, ast.Name ) and node.context_expr.id == '__default__':
has_default = True
if self._rust and not self._cpp:
r.append(self.indent()+'}, _ => {')
else:
r.append(self.indent()+'default:')
elif isinstance( node.context_expr, ast.Name ) and node.context_expr.id == '__select__':
is_select = True
self._match_stack.append( list() )
self._in_select_hack = True
if self._rust:
r.append(self.indent()+'select! (')
elif self._cpp:
r.append(self.indent()+'cpp::select _select_;') ## TODO nested, _select_N
else:
assert self._go
r.append(self.indent()+'select {')
elif isinstance( node.context_expr, ast.Call ):
if not isinstance(node.context_expr.func, ast.Name):
raise SyntaxError( self.visit(node.context_expr))
if len(node.context_expr.args):
a = self.visit(node.context_expr.args[0])
else:
assert len(node.context_expr.keywords)
## need to catch if this is a new variable ##
name = node.context_expr.keywords[0].arg
if name not in self._known_vars:
a = 'let %s = %s' %(name, self.visit(node.context_expr.keywords[0].value))
else:
a = '%s = %s' %(name, self.visit(node.context_expr.keywords[0].value))
if node.context_expr.func.id == '__case__':
is_case = True
case_match = None
select_hack = None
if not len(node.context_expr.args):
assert len(node.context_expr.keywords)==1
kw = node.context_expr.keywords[0]
if self._go:
case_match = '%s := %s' %(kw.arg, self.visit(kw.value))
elif self._cpp and hasattr(self, '_in_select_hack') and self._in_select_hack:
select_hack = True
case_match = '_select_.recv(%s, %s);' %(self.visit(kw.value), kw.arg)
else:
case_match = '%s = %s' %(kw.arg, self.visit(kw.value))
else:
if isinstance(node.context_expr.args[0], ast.Compare):
raise SyntaxError('"case x==n:" is not allowed in a case statement, use "case n:" instead.')
case_match = self.visit(node.context_expr.args[0])
if self._cpp and select_hack:
r.append(self.indent()+case_match)
elif self._rust and not self._cpp:
if len(self._match_stack[-1])==0:
r.append(self.indent()+'%s => {' %case_match)
else:
r.append(self.indent()+'}, %s => { ' %case_match )
else:
assert self._cpp
r.append(self.indent()+'case %s: {' %case_match) ## extra scope
self._match_stack[-1].append(case_match)
elif node.context_expr.func.id == '__switch__':
is_switch = True
self._match_stack.append( list() )
if self._rust and not self._cpp:
r.append(self.indent()+'match (%s) {' %self.visit(node.context_expr.args[0]))
is_match = True
else:
r.append(self.indent()+'switch (%s) {' %self.visit(node.context_expr.args[0]))
elif node.context_expr.func.id == 'extern':
is_extern = True
link = None
for kw in node.context_expr.keywords:
if kw.arg=='link':
link = kw.value.s
if self._cpp:
r.append('extern "C" {') ## TODO other abi's
elif self._rust:
assert link
r.append('#[link(name = "%s")]' %link)
r.append('extern {')
else:
raise SyntaxError('with extern: not supported yet for backend')
## strip the bodies from function defs, that should be just defined as `def f(args):pass`
for b in node.body:
if isinstance(b, ast.FunctionDef):
b.body = []
b.declare_only = True
else:
raise SyntaxError( 'invalid use of with: %s' %node.context_expr)
elif isinstance(node.context_expr, ast.Str):
body = []
for b in node.body: body.append(self.visit(b))
return node.context_expr.s + ';'.join(body)
elif isinstance(node.context_expr, ast.Name):
if node.context_expr.id == 'pointers':
self._shared_pointers = False
r = []
for b in node.body:
a = self.visit(b)
if a: r.append(self.indent()+a)
self._shared_pointers = True
return '\n'.join(r)
elif isinstance(node.context_expr, ast.Tuple) or isinstance(node.context_expr, ast.List):
for elt in node.context_expr.elts:
if elt.id == 'pointers':
self._shared_pointers = False
elif elt.id == 'noexcept':
self._noexcept = True
r = []
for b in node.body:
a = self.visit(b)
if a: r.append(self.indent()+a)
for elt in node.context_expr.elts:
if elt.id == 'pointers':
self._shared_pointers = True
elif elt.id == 'noexcept':
self._noexcept = False
return '\n'.join(r)
else:
raise SyntaxError( 'invalid use of with', node.context_expr)
for b in node.body:
a = self.visit(b)
if a: r.append(self.indent()+a)
if is_case and not self._rust: ## always break after each case - do not fallthru to default: block
r.append(self.indent()+'} break;') ## } extra scope
###################################
if is_extern:
r.append(self.indent()+'}')
elif is_select:
if self._cpp:
r.append(self.indent()+'_select_.wait();')
elif self._rust:
r.append(self.indent()+'})') ## rust needs extra closing brace for the match-block
else:
r.append(self.indent()+'}')
elif is_switch:
if self._rust and not self._cpp:
r.append(self.indent()+'}}') ## rust needs extra closing brace for the match-block
else:
r.append(self.indent()+'}')
return '\n'.join(r)
def _new_module(self, name='main.js'):
header = []
if self._requirejs and not self._webworker:
header.extend([
'define( function(){',
'__module__ = {}'
])
return {
'name' : name,
'header' : header,
'lines' : []
}
def visit_Module(self, node):
modules = []
mod = self._new_module()
modules.append( mod )
lines = mod['lines']
header = mod['header']
if self._insert_runtime:
dirname = os.path.dirname(os.path.abspath(__file__))
runtime = open( os.path.join(dirname, 'pythonjs.js') ).read()
lines.append( runtime ) #.replace('\n', ';') )
for b in node.body:
if isinstance(b, ast.Expr) and isinstance(b.value, ast.Call) and isinstance(b.value.func, ast.Name) and b.value.func.id == '__new_module__':
mod = self._new_module( '%s.js' %b.value.args[0].id )
modules.append( mod )
lines = mod['lines']
header = mod['header']
else:
line = self.visit(b)
if line: lines.append( line )
if self._requirejs and not self._webworker:
for name in self._exports:
if name.startswith('__'): continue
lines.append( '__module__.%s = %s' %(name,name))
lines.append( 'return __module__')
lines.append('}) //end requirejs define')
if len(modules) == 1:
lines = header + lines
## fixed by Foxboron
return '\n'.join(l if isinstance(l,str) else l.encode("utf-8") for l in lines)
else:
d = {}
for mod in modules:
lines = mod['header'] + mod['lines']
d[ mod['name'] ] = '\n'.join(l if isinstance(l,str) else l.encode("utf-8") for l in lines)
return d
def visit_Expr(self, node):
# XXX: this is UGLY
s = self.visit(node.value)
if s.strip() and not s.endswith(';'):
s += ';'
if s==';': return ''
else: return s
def visit_In(self, node):
return ' in '
def visit_Tuple(self, node):
if self._rust:
return 'vec!(%s)' % ', '.join(map(self.visit, node.elts))
elif self._cpp:
## this hack was for `for i in range(x)`.
##return 'std::array<int, %s>{{%s}}' %(len(node.elts), ','.join(map(self.visit, node.elts)))
return '{%s}' %','.join(map(self.visit, node.elts))
else:
return '[%s]' % ', '.join(map(self.visit, node.elts))
def visit_List(self, node):
a = []
for elt in node.elts:
b = self.visit(elt)
if b is None: raise SyntaxError(elt)
a.append( b )
return '[%s]' % ', '.join(a)
def visit_TryExcept(self, node):
out = []
out.append( self.indent() + 'try {' )
self.push()
out.extend(
list( map(self.visit, node.body) )
)
self.pull()
out.append( self.indent() + '} catch(__exception__) {' )
self.push()
out.extend(
list( map(self.visit, node.handlers) )
)
self.pull()
out.append( '}' )
return '\n'.join( out )
def visit_Raise(self, node):
if self._rust:
return 'panic!("%s");' % self.visit(node.type)
elif self._cpp:
T = self.visit(node.type)
if T == 'RuntimeError()': T = 'std::exception'
return 'throw %s;' % T
else:
return 'throw new %s;' % self.visit(node.type)
def visit_Yield(self, node):
return 'yield %s' % self.visit(node.value)
def visit_ImportFrom(self, node):
# print node.module
# print node.names[0].name
# print node.level
if self._rust:
crate = self._crates[node.module]
for alias in node.names:
crate.add( alias.name )
return ''
def visit_Import(self, node):
r = [alias.name.replace('__SLASH__', '/') for alias in node.names]
res = []
if r:
for name in r:
if self._go:
self._imports.add('import("%s");' %name)
elif self._rust:
if name not in self._crates:
self._crates[name] = set()
elif self._lua:
res.append('require "%s"' %name)
else:
raise SyntaxError('import not yet support for this backend')
if res:
return '\n'.join(res)
else:
return ''
def visit_ExceptHandler(self, node):
out = ''
if node.type:
out = 'if (__exception__ == %s || __exception__ instanceof %s) {\n' % (self.visit(node.type), self.visit(node.type))
if node.name:
out += 'var %s = __exception__;\n' % self.visit(node.name)
out += '\n'.join(map(self.visit, node.body)) + '\n'
if node.type:
out += '}\n'
return out
def visit_Lambda(self, node):
args = [self.visit(a) for a in node.args.args]
if args and args[0]=='__INLINE_FUNCTION__':
self._inline_lambda = True
#return '<LambdaError>' ## skip node, the next function contains the real def
raise SwapLambda( node )
else:
return '(function (%s) {return %s;})' %(','.join(args), self.visit(node.body))
def function_has_getter_or_setter(self, node):
options = {'getter':False, 'setter':False}
for d in node.decorator_list:
self._visit_decorator(d, options=options)
return options['getter'] or options['setter']
def _visit_decorator(self, decor, node=None, options=None, args_typedefs=None, chan_args_typedefs=None, generics=None, args_generics=None, func_pointers=None, arrays=None ):
assert node
if options is None: options = dict()
if args_typedefs is None: args_typedefs = dict()
if chan_args_typedefs is None: chan_args_typedefs = dict()
if generics is None: generics = set()
if args_generics is None: args_generics = dict()
if func_pointers is None: func_pointers = set()
if arrays is None: arrays = dict()
if isinstance(decor, ast.Name) and decor.id == 'classmethod':
options['classmethod'] = True
elif isinstance(decor, ast.Name) and decor.id == 'property':
## a function is marked as a getter with `@property`
options['getter'] = True
elif isinstance(decor, ast.Attribute) and isinstance(decor.value, ast.Name) and decor.attr == 'setter':
## a function is marked as a setter with `@name.setter`
options['setter'] = True
elif isinstance(decor, ast.Call) and isinstance(decor.func, ast.Name) and decor.func.id == '__typedef__':
if len(decor.args) == 3:
vname = self.visit(decor.args[0])
vtype = self.visit(decor.args[1])
vptr = decor.args[2].s
args_typedefs[ vname ] = '%s %s' %(vptr, vtype) ## space is required because it could have the `mut` keyword
else:
for key in decor.keywords:
if isinstance( key.value, ast.Str):
args_typedefs[ key.arg ] = key.value.s
elif isinstance(key.value, ast.Name):
T = key.value.id
if self.is_prim_type(T):
args_typedefs[key.arg] = T
else:
if not self._shared_pointers:
args_typedefs[ key.arg ] = '%s*' %T
elif self._unique_ptr:
args_typedefs[ key.arg ] = 'std::unique_ptr<%s>' %T
else:
args_typedefs[ key.arg ] = 'std::shared_ptr<%s>' %T
else:
if isinstance(key.value, ast.Call) and isinstance(key.value.func, ast.Name) and key.value.func.id=='__arg_array__':
arrays[ key.arg ] = key.value.args[0].s
dims = arrays[ key.arg ].count('[')
arrtype = arrays[ key.arg ].split(']')[-1]
## non primitive types (objects and arrays) can be None, `[]MyClass( None, None)`
## use a pointer or smart pointer.
if not self.is_prim_type(arrtype):
if not self._shared_pointers:
arrtype += '*'
elif self._unique_ptr:
arrtype = 'std::unique_ptr<%s>' %arrtype
else:
arrtype = 'std::shared_ptr<%s>' %arrtype
if self._cpp:
T = []
for i in range(dims):
if not self._shared_pointers:
T.append('std::vector<')
elif self._unique_ptr:
T.append('std::unique_ptr<std::vector<')
else:
T.append('std::shared_ptr<std::vector<')
T.append( arrtype )
if self._shared_pointers:
for i in range(dims):
T.append('>>')
else:
for i in range(dims):
if i: T.append('*>')
else: T.append('>')
T.append('*')
args_typedefs[ key.arg ] = ''.join(T)
else:
raise SyntaxError('TODO')
else:
args_typedefs[ key.arg ] = self.visit(key.value)
if args_typedefs[key.arg].startswith('func(') or args_typedefs[key.arg].startswith('lambda('):
is_lambda_style = args_typedefs[key.arg].startswith('lambda(')
func_pointers.add( key.arg )
funcdef = args_typedefs[key.arg]
## TODO - better parser
hack = funcdef.replace(')', '(').split('(')
lambda_args = hack[1].strip()
lambda_return = hack[3].strip()
if self._cpp:
if is_lambda_style:
if lambda_return: ## c++11
args_typedefs[ key.arg ] = 'std::function<%s(%s)> %s' %(lambda_return, lambda_args, key.arg)
else:
args_typedefs[ key.arg ] = 'std::function<void(%s)> %s' %(lambda_args, key.arg)
else: ## old C style function pointers
if lambda_return:
args_typedefs[ key.arg ] = '%s(*%s)(%s)' %(lambda_args, key.arg, lambda_return)
else:
args_typedefs[ key.arg ] = 'void(*%s)(%s)' %(key.arg, lambda_args)
elif self._rust:
if lambda_return:
args_typedefs[ key.arg ] = '|%s|->%s' %(lambda_args, lambda_return)
else:
args_typedefs[ key.arg ] = '|%s|' %lambda_args
elif self._dart:
args_typedefs[ key.arg ] = 'var'
## check for super classes - generics ##
if args_typedefs[ key.arg ] in self._classes:
classname = args_typedefs[ key.arg ]
options['generic_base_class'] = classname
if self._cpp:
if not self._shared_pointers:
args_typedefs[ key.arg ] = '%s*' %classname
elif self._unique_ptr:
args_typedefs[ key.arg ] = 'std::unique_ptr<%s>' %classname
else:
args_typedefs[ key.arg ] = 'std::shared_ptr<%s>' %classname
args_generics[ key.arg ] = classname
for subclass in self._classes[classname]._subclasses:
generics.add( subclass )
elif self._rust:
args_typedefs[ key.arg ] = 'Rc<RefCell<%s>>' %classname
elif self._go: ## TODO test if this is still working in the Go backend
if node.name=='__init__':
## generics type switch is not possible in __init__ because
## it is used to generate the type struct, where types are static.
## as a workaround generics passed to init always become `interface{}`
args_typedefs[ key.arg ] = 'interface{}'
#self._class_stack[-1]._struct_def[ key.arg ] = 'interface{}'
else:
generics.add( classname ) # switch v.(type) for each
generics = generics.union( self._classes[classname]._subclasses ) ## TODO
args_typedefs[ key.arg ] = 'interface{}'
args_generics[ key.arg ] = classname
elif isinstance(decor, ast.Call) and isinstance(decor.func, ast.Name) and decor.func.id == '__typedef_chan__':
for key in decor.keywords:
if isinstance(key.value, ast.Str):
chan_args_typedefs[ key.arg ] = key.value.s.strip()
else:
chan_args_typedefs[ key.arg ] = self.visit(key.value)
elif isinstance(decor, ast.Call) and isinstance(decor.func, ast.Name) and decor.func.id == 'returns':
if decor.keywords:
raise SyntaxError('invalid go return type')
elif isinstance(decor.args[0], ast.Name):
options['returns'] = decor.args[0].id
else:
options['returns'] = decor.args[0].s
if options['returns'].startswith('[]'):
options['returns_array'] = True
options['returns_array_dim'] = options['returns'].count('[]')
options['returns_array_type'] = options['returns'].split(']')[-1]
if self._cpp:
if options['returns_array_type']=='string':
options['returns_array_type'] = 'std::string'
T = []
for i in range(options['returns_array_dim']):
if not self._shared_pointers:
T.append('std::vector<')
elif self._unique_ptr:
T.append('std::unique_ptr<std::vector<')
else:
T.append('std::shared_ptr<std::vector<')
T.append(options['returns_array_type'])
if self._shared_pointers:
for i in range(options['returns_array_dim']):
T.append('>>')
else:
for i in range(options['returns_array_dim']):
if i: T.append('*>')
else: T.append('>')
T.append('*')
options['returns'] = ''.join(T)
elif self._rust:
raise SyntaxError('TODO return 2d array rust backend')
else:
raise SyntaxError('TODO return 2d array some backend')
if options['returns'] == 'self':
options['returns_self'] = True
self.method_returns_multiple_subclasses[ self._class_stack[-1].name ].add(node.name)
if self._go:
options['returns'] = '*' + self._class_stack[-1].name ## go hacked generics
def visit_FunctionDef(self, node):
self._function_stack.append( node )
node._local_vars = set()
buffer = self._visit_function( node )
if node == self._function_stack[0]: ## could do something special here with global function
#buffer += 'pythonjs.%s = %s' %(node.name, node.name) ## this is no longer needed
self._global_functions[ node.name ] = node
self._function_stack.pop()
return buffer
def _visit_function(self, node):
comments = []
body = []
is_main = node.name == 'main'
is_annon = node.name == ''
is_pyfunc = False
is_prototype = False
protoname = None
func_expr = False ## function expressions `var a = function()` are not hoisted
func_expr_var = True
for decor in node.decorator_list:
if isinstance(decor, ast.Call) and isinstance(decor.func, ast.Name) and decor.func.id == 'expression':
assert len(decor.args)==1
func_expr = True
func_expr_var = isinstance(decor.args[0], ast.Name)
node.name = self.visit(decor.args[0])
elif isinstance(decor, ast.Name) and decor.id == '__pyfunction__':
is_pyfunc = True
elif isinstance(decor, ast.Call) and isinstance(decor.func, ast.Name) and decor.func.id == '__prototype__': ## TODO deprecated
assert len(decor.args)==1
is_prototype = True
protoname = decor.args[0].id
args = self.visit(node.args)
if is_prototype:
fdef = '%s.prototype.%s = function(%s)' % (protoname, node.name, ', '.join(args))
elif len(self._function_stack) == 1:
## this style will not make function global to the eval context in NodeJS ##
#buffer = self.indent() + 'function %s(%s) {\n' % (node.name, ', '.join(args))
## note if there is no var keyword and this function is at the global level,
## then it should be callable from eval in NodeJS - this is not correct.
## infact, var should always be used with function expressions.
if self._func_expressions or func_expr:
if func_expr_var:
fdef = 'var %s = function(%s)' % (node.name, ', '.join(args))
else:
fdef = '%s = function(%s)' % (node.name, ', '.join(args))
else:
fdef = 'function %s(%s)' % (node.name, ', '.join(args))
if self._requirejs and node.name not in self._exports:
self._exports.add( node.name )
else:
if self._func_expressions or func_expr:
if func_expr_var:
fdef = 'var %s = function(%s)' % (node.name, ', '.join(args))
else:
fdef = '%s = function(%s)' % (node.name, ', '.join(args))
else:
fdef = 'function %s(%s)' % (node.name, ', '.join(args))
body.append( fdef )
body.append( self.indent() + '{' )
self.push()
next = None
for i,child in enumerate(node.body):
if isinstance(child, Str) or hasattr(child, 'SKIP'):
continue
elif isinstance(child, ast.Expr) and isinstance(child.value, ast.Str):
comments.append('/* %s */' %child.value.s.strip() )
continue
#try:
# v = self.visit(child)
#except SwapLambda as error:
# error.node.__class__ = ast.FunctionDef
# next = node.body[i+1]
# if not isinstance(next, ast.FunctionDef):
# raise SyntaxError('inline def is only allowed in javascript mode')
# error.node.__dict__ = next.__dict__
# error.node.name = ''
# v = self.visit(child)
v = self.try_and_catch_swap_lambda(child, node.body)
if v is None:
msg = 'error in function: %s'%node.name
msg += '\n%s' %child
raise SyntaxError(msg)
else:
body.append( self.indent()+v)
#buffer += '\n'.join(body)
self.pull()
#buffer += '\n%s}' %self.indent()
body.append( self.indent() + '}' )
buffer = '\n'.join( comments + body )
#if self._inline_lambda:
# self._inline_lambda = False
if is_annon:
buffer = '__wrap_function__(' + buffer + ')'
elif is_pyfunc:
## TODO change .is_wrapper to .__pyfunc__
buffer += ';%s.is_wrapper = true;' %node.name
else:
buffer += '\n'
return self.indent() + buffer
def try_and_catch_swap_lambda(self, child, body):
try:
return self.visit(child)
except SwapLambda as e:
next = None
for i in range( body.index(child), len(body) ):
n = body[ i ]
if isinstance(n, ast.FunctionDef):
if hasattr(n, 'SKIP'):
continue
else:
next = n
break
assert next
next.SKIP = True
e.node.__class__ = ast.FunctionDef
e.node.__dict__ = next.__dict__
e.node.name = ''
return self.try_and_catch_swap_lambda( child, body )
def _visit_subscript_ellipsis(self, node):
name = self.visit(node.value)
return '%s["$wrapped"]' %name
def visit_Subscript(self, node):
if isinstance(node.slice, ast.Ellipsis):
return self._visit_subscript_ellipsis( node )
else:
return '%s[%s]' % (self.visit(node.value), self.visit(node.slice))
def visit_Index(self, node):
return self.visit(node.value)
def visit_Slice(self, node):
raise SyntaxError('list slice') ## slicing not allowed here at js level
def visit_arguments(self, node):
out = []
for name in [self.visit(arg) for arg in node.args]:
out.append(name)
return out
def visit_Name(self, node):
if node.id == 'None':
return 'null'
elif node.id == 'True':
return 'true'
elif node.id == 'False':
return 'false'
elif node.id == 'null':
return 'null'
return node.id
def visit_Attribute(self, node):
name = self.visit(node.value)
attr = node.attr
return '%s.%s' % (name, attr)
def visit_Print(self, node):
args = [self.visit(e) for e in node.values]
s = 'console.log(%s);' % ', '.join(args)
return s
def visit_keyword(self, node):
if isinstance(node.arg, basestring):
return node.arg, self.visit(node.value)
return self.visit(node.arg), self.visit(node.value)
def _visit_call_helper_instanceof(self, node):
args = map(self.visit, node.args)
if len(args) == 2:
return '%s instanceof %s' %tuple(args)
else:
raise SyntaxError( args )
def _visit_call_helper_new(self, node):
args = map(self.visit, node.args)
if len(args) == 1:
return ' new %s' %args[0]
else:
raise SyntaxError( args )
def _visit_call_special( self, node ):
raise NotImplementedError('special call')
def parse_go_style_arg( self, s ):
if isinstance(s, ast.Str): s = s.s
return s.split(']')[-1]
def _visit_call_helper_go(self, node):
go_types = 'bool string int float64'.split()
name = self.visit(node.func)
if name == '__go__':
if self._cpp:
## simple auto threads
thread = '__thread%s__' %len(self._threads)
self._threads.append(thread)
closure_wrapper = '[&]{%s;}'%self.visit(node.args[0])
return 'std::thread %s( %s );' %(thread, closure_wrapper)
elif self._rust:
#return 'spawn( move || {%s;} );' % self.visit(node.args[0])
return 'Thread::spawn( move || {%s;} );' % self.visit(node.args[0])
elif self._dart:
return 'Isolate.spawn(%s);' %self.visit(node.args[0])
else:
return 'go %s' %self.visit(node.args[0])
elif name == '__go_make__':
if len(node.args)==2:
return 'make(%s, %s)' %(self.visit(node.args[0]), self.visit(node.args[1]))
elif len(node.args)==3:
return 'make(%s, %s, %s)' %(self.visit(node.args[0]), self.visit(node.args[1]), self.visit(node.args[1]))
else:
raise SyntaxError('go make requires 2 or 3 arguments')
elif name == '__go_make_chan__':
## channel constructors
if self._cpp:
## cpp-channel API supports input/output
return 'cpp::channel<%s>{}'%self.visit(node.args[0])
elif self._rust:
## rust returns a tuple input/output that needs to be destructured by the caller
return 'channel::<%s>()' %self.visit(node.args[0])
else: ## Go
return 'make(chan %s)' %self.visit(node.args[0])
elif name == '__go__array__':
if isinstance(node.args[0], ast.BinOp):# and node.args[0].op == '<<': ## todo assert right is `typedef`
a = self.visit(node.args[0].left)
if a in go_types:
if self._go:
return '*[]%s' %a
elif self._rust:
return '&mut Vec<%s>' %a ## TODO test this
else:
raise RuntimeError('todo')
else:
return '*[]*%s' %a ## todo - self._catch_assignment_array_of_obs = true
else:
a = self.visit(node.args[0])
if a in go_types:
return '[]%s{}' %a
else:
return '[]*%s{}' %a
elif name == '__go__addr__':
return '&%s' %self.visit(node.args[0])
else:
raise SyntaxError(name)
def visit_Call(self, node):
name = self.visit(node.func)
if name in typedpython.GO_SPECIAL_CALLS.values():
return self._visit_call_helper_go( node )
elif name in self.catch_call:
return self._visit_call_special( node )
elif name == 'instanceof': ## this gets used by "with javascript:" blocks to test if an instance is a JavaScript type
return self._visit_call_helper_instanceof( node )
elif name == 'new':
return self._visit_call_helper_new( node )
elif name == '__ternary_operator__':
args = map(self.visit, node.args)
if len(args) == 2:
return '((%s) ? %s : %s)' %(args[0], args[0], args[1])
elif len(args) == 3:
return '((%s) ? %s : %s)' %(args[0], args[1], args[2])
else:
raise SyntaxError( args )
elif name == 'numpy.array':
return self._visit_call_helper_numpy_array(node)
elif name == 'JSObject':
return self._visit_call_helper_JSObject( node )
elif name == 'var':
return self._visit_call_helper_var( node )
elif name == 'JSArray':
return self._visit_call_helper_JSArray( node )
elif name == 'inline' or name == 'JS':
assert len(node.args)==1 and isinstance(node.args[0], ast.Str)
return self._inline_code_helper( node.args[0].s )
elif name == 'dart_import':
if len(node.args) == 1:
return 'import "%s";' %node.args[0].s
elif len(node.args) == 2:
return 'import "%s" as %s;' %(node.args[0].s, node.args[1].s)
else:
raise SyntaxError
elif name == 'list':
return self._visit_call_helper_list( node )
elif name == '__get__' and len(node.args)==2 and isinstance(node.args[1], ast.Str) and node.args[1].s=='__call__':
return self._visit_call_helper_get_call_special( node )
elif name.split('.')[-1] == '__go__receive__':
raise SyntaxError('this should not happen __go__receive__')
else:
return self._visit_call_helper(node)
def _visit_call_helper(self, node):
if node.args:
args = [self.visit(e) for e in node.args]
args = ', '.join([e for e in args if e])
else:
args = ''
fname = self.visit(node.func)
if fname=='__DOLLAR__': fname = '$'
return '%s(%s)' % (fname, args)
def inline_helper_remap_names(self, remap):
return "var %s;" %','.join(remap.values())
def inline_helper_return_id(self, return_id):
return "var __returns__%s = null;"%return_id
def _visit_call_helper_numpy_array(self, node):
return self.visit(node.args[0])
def _visit_call_helper_list(self, node):
name = self.visit(node.func)
if node.args:
args = [self.visit(e) for e in node.args]
args = ', '.join([e for e in args if e])
else:
args = ''
return '%s(%s)' % (name, args)
def _visit_call_helper_get_call_special(self, node):
name = self.visit(node.func)
if node.args:
args = [self.visit(e) for e in node.args]
args = ', '.join([e for e in args if e])
else:
args = ''
return '%s(%s)' % (name, args)
def _visit_call_helper_JSArray(self, node):
if node.args:
args = map(self.visit, node.args)
out = ', '.join(args)
#return '__create_array__(%s)' % out
return '[%s]' % out
else:
return '[]'
def _visit_call_helper_JSObject(self, node):
if node.keywords:
kwargs = map(self.visit, node.keywords)
f = lambda x: '"%s": %s' % (x[0], x[1])
out = ', '.join(map(f, kwargs))
return '{%s}' % out
else:
return '{}'
def _visit_call_helper_var(self, node):
args = [ self.visit(a) for a in node.args ]
if self._function_stack:
fnode = self._function_stack[-1]
rem = []
for arg in args:
if arg in fnode._local_vars:
rem.append( arg )
else:
fnode._local_vars.add( arg )
for arg in rem:
args.remove( arg )
out = []
if args:
out.append( 'var ' + ','.join(args) )
if node.keywords:
out.append( 'var ' + ','.join([key.arg for key in node.keywords]) )
return ';'.join(out)
def _inline_code_helper(self, s):
## TODO, should newline be changed here?
s = s.replace('\n', '\\n').replace('\0', '\\0') ## AttributeError: 'BinOp' object has no attribute 's' - this is caused by bad quotes
if s.strip().startswith('#'): s = '/*%s*/'%s
if '__new__>>' in s:
## hack that fixes inline `JS("new XXX")`,
## TODO improve typedpython to be aware of quoted strings
s = s.replace('__new__>>', ' new ')
elif '"' in s or "'" in s: ## can not trust direct-replace hacks
pass
else:
if ' or ' in s:
s = s.replace(' or ', ' || ')
if ' not ' in s:
s = s.replace(' not ', ' ! ')
if ' and ' in s:
s = s.replace(' and ', ' && ')
return s
def visit_While(self, node):
body = [ 'while (%s)' %self.visit(node.test), self.indent()+'{']
self.push()
for line in list( map(self.visit, node.body) ):
body.append( self.indent()+line )
self.pull()
body.append( self.indent() + '}' )
return '\n'.join( body )
def visit_Str(self, node):
s = node.s.replace("\\", "\\\\").replace('\n', '\\n').replace('\r', '\\r').replace('"', '\\"')
#if '"' in s:
# return "'%s'" % s
return '"%s"' % s
def visit_BinOp(self, node):
left = self.visit(node.left)
op = self.visit(node.op)
right = self.visit(node.right)
go_hacks = ('__go__array__', '__go__arrayfixed__', '__go__map__', '__go__func__', '__go__receive__', '__go__send__')
if op == '>>' and left == '__new__':
## this can happen because python_to_pythonjs.py will catch when a new class instance is created
## (when it knows that class name) and replace it with `new(MyClass())`; but this can cause a problem
## if later the user changes that part of their code into a module, and loads it as a javascript module,
## they may update their code to call `new MyClass`, and then later go back to the python library.
## the following hack prevents `new new`
if isinstance(node.right, ast.Call) and isinstance(node.right.func, ast.Name) and node.right.func.id=='new':
right = self.visit(node.right.args[0])
return ' new %s' %right
elif op == '<<':
if left in ('__go__receive__', '__go__send__'):
self._has_channels = True
return '%s.recv()' %right
if isinstance(node.left, ast.Call) and isinstance(node.left.func, ast.Name) and node.left.func.id in go_hacks:
if node.left.func.id == '__go__func__':
raise SyntaxError('TODO - go.func')
elif node.left.func.id == '__go__map__':
key_type = self.visit(node.left.args[0])
value_type = self.visit(node.left.args[1])
if value_type == 'interface': value_type = 'interface{}'
return '&map[%s]%s%s' %(key_type, value_type, right)
else:
if isinstance(node.right, ast.Name):
raise SyntaxError(node.right.id)
right = []
for elt in node.right.elts:
if isinstance(elt, ast.Num):
right.append( str(elt.n)+'i' )
else:
right.append( self.visit(elt) )
right = '(%s)' %','.join(right)
if node.left.func.id == '__go__array__':
T = self.visit(node.left.args[0])
if T in go_types:
#return '&mut vec!%s' %right
return 'Rc::new(RefCell::new(vec!%s))' %right
else:
self._catch_assignment = {'class':T} ## visit_Assign catches this
return '&[]*%s%s' %(T, right)
elif node.left.func.id == '__go__arrayfixed__':
asize = self.visit(node.left.args[0])
atype = self.visit(node.left.args[1])
return ' new Array(%s) /*array of: %s*/' %(asize, atype)
if left in self._typed_vars and self._typed_vars[left] == 'numpy.float32':
left += '[_id_]'
if right in self._typed_vars and self._typed_vars[right] == 'numpy.float32':
right += '[_id_]'
return '(%s %s %s)' % (left, op, right)
def visit_Mult(self, node):
return '*'
def visit_Add(self, node):
return '+'
def visit_Sub(self, node):
return '-'
def visit_Div(self, node):
return '/'
def visit_Mod(self, node):
return '%'
def visit_Lt(self, node):
return '<'
def visit_Gt(self, node):
return '>'
def visit_GtE(self, node):
return '>='
def visit_LtE(self, node):
return '<='
def visit_LShift(self, node):
return '<<'
def visit_RShift(self, node):
return '>>'
def visit_BitXor(self, node):
return '^'
def visit_BitOr(self, node):
return '|'
def visit_BitAnd(self, node):
return '&'
def visit_Return(self, node):
if isinstance(node.value, Tuple):
return 'return [%s];' % ', '.join(map(self.visit, node.value.elts))
if node.value:
return 'return %s;' % self.visit(node.value)
return 'return undefined;'
def visit_Pass(self, node):
return '/*pass*/'
def visit_Eq(self, node):
return '=='
def visit_NotEq(self, node):
return '!='
def visit_Num(self, node):
return str(node.n)
def visit_Is(self, node):
return '==='
def visit_Compare(self, node):
if isinstance(node.ops[0], ast.Eq):
left = self.visit(node.left)
right = self.visit(node.comparators[0])
if self._lua:
return '%s == %s' %(left, right)
elif self._fast_js:
return '(%s===%s)' %(left, right)
else:
return '(%s instanceof Array ? JSON.stringify(%s)==JSON.stringify(%s) : %s===%s)' %(left, left, right, left, right)
elif isinstance(node.ops[0], ast.NotEq):
left = self.visit(node.left)
right = self.visit(node.comparators[0])
if self._lua:
return '%s ~= %s' %(left, right)
elif self._fast_js:
return '(%s!==%s)' %(left, right)
else:
return '(!(%s instanceof Array ? JSON.stringify(%s)==JSON.stringify(%s) : %s===%s))' %(left, left, right, left, right)
else:
comp = [ '(']
comp.append( self.visit(node.left) )
comp.append( ')' )
for i in range( len(node.ops) ):
comp.append( self.visit(node.ops[i]) )
if isinstance(node.ops[i], ast.Eq):
raise SyntaxError('TODO')
elif isinstance(node.comparators[i], ast.BinOp):
comp.append('(')
comp.append( self.visit(node.comparators[i]) )
comp.append(')')
else:
comp.append( self.visit(node.comparators[i]) )
return ' '.join( comp )
def visit_Not(self, node):
return '!'
def visit_IsNot(self, node):
return '!=='
def visit_UnaryOp(self, node):
#return self.visit(node.op) + self.visit(node.operand)
return '%s (%s)' %(self.visit(node.op),self.visit(node.operand))
def visit_USub(self, node):
return '-'
def visit_And(self, node):
return ' && '
def visit_Or(self, node):
return ' || '
def visit_BoolOp(self, node):
op = self.visit(node.op)
return '('+ op.join( [self.visit(v) for v in node.values] ) +')'
def visit_If(self, node):
out = []
test = self.visit(node.test)
if test.startswith('(') and test.endswith(')'):
out.append( 'if %s' %test )
else:
out.append( 'if (%s)' %test )
out.append( self.indent() + '{' )
self.push()
for line in list(map(self.visit, node.body)):
if line is None: continue
out.append( self.indent() + line )
orelse = []
for line in list(map(self.visit, node.orelse)):
orelse.append( self.indent() + line )
self.pull()
if orelse:
out.append( self.indent() + '}')
out.append( self.indent() + 'else')
out.append( self.indent() + '{')
out.extend( orelse )
out.append( self.indent() + '}' )
return '\n'.join( out )
def visit_Dict(self, node):
a = []
for i in range( len(node.keys) ):
k = self.visit( node.keys[ i ] )
v = self.visit( node.values[i] )
a.append( '%s:%s'%(k,v) )
b = ', '.join( a )
return '{ %s }' %b
def _visit_for_prep_iter_helper(self, node, out, iter_name):
## support "for key in JSObject" ##
#out.append( self.indent() + 'if (! (iter instanceof Array) ) { iter = Object.keys(iter) }' )
## new style - Object.keys only works for normal JS-objects, not ones created with `Object.create(null)`
if not self._fast_loops:
out.append(
self.indent() + 'if (! (%s instanceof Array || typeof %s == "string" || __is_typed_array(%s) || __is_some_array(%s) )) { %s = __object_keys__(%s) }' %(iter_name, iter_name, iter_name, iter_name, iter_name, iter_name)
)
_iter_id = 0
def visit_For(self, node):
'''
for loops inside a `with javascript:` block will produce this faster for loop.
note that the rules are python-style, even though we are inside a `with javascript:` block:
. an Array is like a list, `for x in Array` gives you the value (not the index as you would get in pure javascript)
. an Object is like a dict, `for v in Object` gives you the key (not the value as you would get in pure javascript)
if your are trying to opitmize looping over a PythonJS list, you can do this:
for v in mylist[...]:
print v
above works because [...] returns the internal Array of mylist
'''
target = node.target.id
iter = self.visit(node.iter) # iter is the python iterator
out = []
body = []
self._iter_id += 1
index = '__i%s' %self._iter_id
if not self._fast_loops:
iname = '__iter%s' %self._iter_id
out.append( self.indent() + 'var %s = %s;' % (iname, iter) )
else:
iname = iter
self._visit_for_prep_iter_helper(node, out, iname)
if self._fast_loops:
out.append( 'for (var %s=0; %s < %s.length; %s++)' % (index, index, iname, index) )
out.append( self.indent() + '{' )
else:
out.append( self.indent() + 'for (var %s=0; %s < %s.length; %s++) {' % (index, index, iname, index) )
self.push()
body.append( self.indent() + 'var %s = %s[ %s ];' %(target, iname, index) )
for line in list(map(self.visit, node.body)):
body.append( self.indent() + line )
self.pull()
out.extend( body )
out.append( self.indent() + '}' )
return '\n'.join( out )
def visit_Continue(self, node):
return 'continue'
def visit_Break(self, node):
return 'break;'
def generate_minimal_runtime():
from python_to_pythonjs import main as py2pyjs
a = py2pyjs(
open('runtime/builtins_core.py', 'rb').read(),
module_path = 'runtime',
fast_javascript = True
)
return main( a, requirejs=False, insert_runtime=False, function_expressions=True, fast_javascript=True )
def generate_runtime():
from python_to_pythonjs import main as py2pyjs
builtins = py2pyjs(
open('runtime/builtins.py', 'rb').read(),
module_path = 'runtime',
fast_javascript = True
)
lines = [
main( open('runtime/pythonpythonjs.py', 'rb').read(), requirejs=False, insert_runtime=False, function_expressions=True, fast_javascript=True ), ## lowlevel pythonjs
main( builtins, requirejs=False, insert_runtime=False, function_expressions=True, fast_javascript=True )
]
return '\n'.join( lines )
def main(source, requirejs=True, insert_runtime=True, webworker=False, function_expressions=True, fast_javascript=False, fast_loops=False):
head = []
tail = []
script = False
osource = source
if source.strip().startswith('<html'):
lines = source.splitlines()
for line in lines:
if line.strip().startswith('<script') and 'type="text/python"' in line:
head.append( '<script type="text/javascript">')
script = list()
elif line.strip() == '</script>':
if type(script) is list:
source = '\n'.join(script)
script = True
tail.append( '</script>')
elif script is True:
tail.append( '</script>')
else:
head.append( '</script>')
elif isinstance( script, list ):
script.append( line )
elif script is True:
tail.append( line )
else:
head.append( line )
try:
tree = ast.parse( source )
#raise SyntaxError(source)
except SyntaxError:
import traceback
err = traceback.format_exc()
sys.stderr.write( err )
sys.stderr.write( '\n--------------error in second stage translation--------------\n' )
lineno = 0
for line in err.splitlines():
if "<unknown>" in line:
lineno = int(line.split()[-1])
lines = source.splitlines()
if lineno > 10:
for i in range(lineno-5, lineno+5):
sys.stderr.write( 'line %s->'%i )
sys.stderr.write( lines[i] )
if i==lineno-1:
sys.stderr.write(' <<SyntaxError>>')
sys.stderr.write( '\n' )
else:
sys.stderr.write( lines[lineno] )
sys.stderr.write( '\n' )
if '--debug' in sys.argv:
sys.stderr.write( osource )
sys.stderr.write( '\n' )
sys.exit(1)
gen = JSGenerator(
source = source,
requirejs=requirejs,
insert_runtime=insert_runtime,
webworker=webworker,
function_expressions=function_expressions,
fast_javascript = fast_javascript,
fast_loops = fast_loops
)
output = gen.visit(tree)
if head and not isinstance(output, dict):
head.append( output )
head.extend( tail )
output = '\n'.join( head )
return output
def command():
scripts = []
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if arg.endswith('.py'):
scripts.append( arg )
if len(scripts):
a = []
for script in scripts:
a.append( open(script, 'rb').read() )
data = '\n'.join( a )
else:
data = sys.stdin.read()
js = main( data )
print( js )
if __name__ == '__main__':
if '--runtime' in sys.argv:
print('creating new runtime: pythonjs.js')
open('pythonjs.js', 'wb').write( generate_runtime() )
elif '--miniruntime' in sys.argv:
print('creating new runtime: pythonjs-minimal.js')
open('pythonjs-minimal.js', 'wb').write( generate_minimal_runtime() )
else:
command()
|
{
"content_hash": "1c4a403509d6db67bbbf9d79332da45d",
"timestamp": "",
"source": "github",
"line_count": 1568,
"max_line_length": 220,
"avg_line_length": 29.821428571428573,
"alnum_prop": 0.6116766467065868,
"repo_name": "kustomzone/Rusthon",
"id": "17844624e72bcda8789e45a3083ee150db942035",
"size": "46760",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pythonjs/pythonjs.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "23667"
},
{
"name": "HTML",
"bytes": "44433"
},
{
"name": "JavaScript",
"bytes": "70985"
},
{
"name": "Perl",
"bytes": "66040"
},
{
"name": "Python",
"bytes": "1007330"
}
],
"symlink_target": ""
}
|
import logging
from restlib2.http import codes
from restlib2.params import Parametizer, BoolParam, StrParam
from modeltree.tree import MODELTREE_DEFAULT_ALIAS, trees
from avocado.events import usage
from avocado.query import pipeline
from serrano.conf import settings
from .base import FieldBase
from ...links import reverse_tmpl
log = logging.getLogger(__name__)
class FieldStatsParametizer(Parametizer):
aware = BoolParam(False)
tree = StrParam(MODELTREE_DEFAULT_ALIAS, choices=trees)
processor = StrParam('default', choices=pipeline.query_processors)
class FieldStats(FieldBase):
"Field Stats Resource"
parametizer = FieldStatsParametizer
def get_link_templates(self, request):
uri = request.build_absolute_uri
return {
'self': reverse_tmpl(
uri, 'serrano:field-stats', {'pk': (int, 'id')}),
'parent': reverse_tmpl(
uri, 'serrano:field', {'pk': (int, 'parent_id')}),
}
def get(self, request, pk):
instance = self.get_object(request, pk=pk)
stats_capable = settings.STATS_CAPABLE
if stats_capable and not stats_capable(instance):
data = {
'message': 'This field does not support stats reporting.'
}
return self.render(
request, data, status=codes.unprocessable_entity)
params = self.get_params(request)
if params['aware']:
context = self.get_context(request)
else:
context = None
QueryProcessor = pipeline.query_processors[params['processor']]
processor = QueryProcessor(context=context, tree=instance.model)
queryset = processor.get_queryset(request=request)
if instance.simple_type == 'number':
resp = {
'max': instance.max(queryset=queryset),
'min': instance.min(queryset=queryset),
'avg': instance.avg(queryset=queryset)
}
elif (instance.simple_type == 'date' or
instance.simple_type == 'time' or
instance.simple_type == 'datetime'):
resp = {
'max': instance.max(queryset=queryset),
'min': instance.min(queryset=queryset)
}
else:
resp = {
'count': instance.count(queryset=queryset, distinct=True)
}
usage.log('stats', instance=instance, request=request)
return resp
|
{
"content_hash": "7b5a51b9aa5aceb1108b014392dd911e",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 73,
"avg_line_length": 32.828947368421055,
"alnum_prop": 0.6032064128256514,
"repo_name": "chop-dbhi/serrano",
"id": "46abdd1e8654a1a05d6d7206521a20a1c61e7ba5",
"size": "2495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "serrano/resources/field/stats.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "84"
},
{
"name": "Python",
"bytes": "351235"
},
{
"name": "Shell",
"bytes": "2355"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os.path
def join_file_paths(path1, path2):
path1_abs = os.path.abspath(path1)
path1_dir = os.path.dirname(path1_abs)
path = os.path.join(path1_dir, path2)
path = os.path.normpath(path)
return path
|
{
"content_hash": "f34c6ca969f34359feeb5d82db0a2e11",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 42,
"avg_line_length": 18,
"alnum_prop": 0.674074074074074,
"repo_name": "AoiKuiyuyou/AoikTopDownParser",
"id": "55c148347a811e95f73e1f699c98021af5faf019",
"size": "286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/aoiktopdownparser/util/path_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "182752"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
from tensorflow.python.layers.normalization import BatchNormalization as TFBatchNorm
from .template import BaseLayer
from tensorflow.python.ops import init_ops
class L2_Normalize(BaseLayer):
@BaseLayer.init_name_scope
def __init__(self, dim):
'''dim (int or list of ints): dimension to normalize'''
self.dim = dim
def _train_fprop(self, state_below):
return tf.nn.l2_normalize(state_below, dim=self.dim)
class BatchNormalization(BaseLayer):
@BaseLayer.init_name_scope
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None):
'''
Reference:
Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift
http://arxiv.org/abs/1502.03167
Args:
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Conv2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `True`, use a faster, fused implementation if possible.
If `None`, use the system recommended implementation.
Note:
>>> # To use this normalization, apply update ops below to update the mean and variance
>>> from tensorflow.python.framework import ops
>>> optimizer = tf.train.AdamOptimizer(learning_rate)
>>> update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
>>> with ops.control_dependencies(update_ops):
>>> train_op = optimizer.minimize(train_cost_sb)
'''
self.axis=axis
self.momentum=momentum
self.epsilon=epsilon
self.center=center
self.scale=scale
self.beta_initializer=beta_initializer
self.gamma_initializer=gamma_initializer
self.moving_mean_initializer=moving_mean_initializer
self.moving_variance_initializer=moving_variance_initializer
self.beta_regularizer=beta_regularizer
self.gamma_regularizer=gamma_regularizer
self.beta_constraint=beta_constraint
self.gamma_constraint=gamma_constraint
self.renorm=renorm
self.renorm_clipping=renorm_clipping
self.renorm_momentum=renorm_momentum
self.fused=fused
@BaseLayer.init_name_scope
def __init_var__(self, state_below):
scope_ = tf.get_default_graph().get_name_scope()
self.bn = TFBatchNorm(axis=self.axis,
momentum=self.momentum,
epsilon=self.epsilon,
center=self.center,
scale=self.scale,
beta_initializer=self.beta_initializer,
gamma_initializer=self.gamma_initializer,
moving_mean_initializer=self.moving_mean_initializer,
moving_variance_initializer=self.moving_variance_initializer,
beta_regularizer=self.beta_regularizer,
gamma_regularizer=self.gamma_regularizer,
beta_constraint=self.beta_constraint,
gamma_constraint=self.gamma_constraint,
renorm=self.renorm,
renorm_clipping=self.renorm_clipping,
renorm_momentum=self.renorm_momentum,
fused=self.fused,
name=str(scope_))
input_shape = [int(dim) for dim in state_below.shape[1:]]
self.bn.build(input_shape=[None] + list(input_shape))
def _train_fprop(self, state_below):
return self.bn.apply(state_below, training=True)
def _test_fprop(self, state_below):
return self.bn.apply(state_below, training=False)
class LRN(BaseLayer):
@BaseLayer.init_name_scope
def __init__(self, depth_radius=None, bias=None, alpha=None, beta=None):
'''
Description:
Local Response Normalization.
The 4-D input tensor is treated as a 3-D array of 1-D vectors
(along the last dimension), and each vector is normalized independently.
Within a given vector, each component is divided by the weighted,
squared sum of inputs within depth_radius. In detail,
>>> sqr_sum[a, b, c, d] = sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
>>> output = input / (bias + alpha * sqr_sum) ** beta
Args:
depth_radius (optional int): defaults to 5. 0-D. Half-width of the 1-D normalization window.
bias (optional float): Defaults to 1. An offset (usually positive to avoid dividing by 0).
alpha (optional float): Defaults to 1. A scale factor, usually positive.
beta (optional float): Defaults to 0.5. An exponent.
'''
self.depth_radius = depth_radius
self.bias = bias
self.alpha = alpha
self.beta = beta
def _train_fprop(self, state_below):
return tf.nn.local_response_normalization(state_below, depth_radius=self.depth_radius,
bias=self.bias, alpha=self.alpha,
beta=self.beta, name=None)
|
{
"content_hash": "9e24537bc1e8e214931374c5f15244f5",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 105,
"avg_line_length": 50.4593023255814,
"alnum_prop": 0.5945385413066021,
"repo_name": "hycis/TensorGraph",
"id": "c0d3e14df3058399c93400f919b36527cc1ec2a6",
"size": "8679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorgraph/layers/normalization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "163332"
},
{
"name": "Shell",
"bytes": "177"
}
],
"symlink_target": ""
}
|
import os
from unittest.mock import patch
import pytest
from elizabeth.core.providers import Path
from elizabeth.intd import (
PROGRAMMING_LANGS,
FOLDERS,
PROJECT_NAMES
)
@pytest.fixture
def path():
return Path()
def test_root(path):
platform_patcher = patch('sys.platform', 'win32')
platform_patcher.start()
assert 'С:\\' == path.root
platform_patcher.stop()
platform_patcher = patch('sys.platform', 'linux2')
platform_patcher.start()
assert '/' == path.root
platform_patcher.stop()
def test_home(path):
platform_patcher = patch('sys.platform', 'win32')
platform_patcher.start()
assert 'С:\\Users\\' == path.home
platform_patcher.stop()
platform_patcher = patch('sys.platform', 'linux2')
platform_patcher.start()
assert '/home/' == path.home
platform_patcher.stop()
def test_user(path):
user = path.user(gender='female')
result = user.split(os.sep)
assert len(result) == 3
def test_users_folder(path):
folder = path.users_folder(user_gender='female')
folder = folder.split(os.sep)
assert len(folder) == 4
assert folder[3] in FOLDERS
def test_dev_dir(path):
dev_dir = path.dev_dir(user_gender='female')
# /home/yajaira/Development/Lua
dev_dir = dev_dir.split(os.sep)
assert len(dev_dir) == 5
assert dev_dir[4] in PROGRAMMING_LANGS
def test_project_dir(path):
project_path = path.project_dir(user_gender='female')
project_path = project_path.split(os.sep)
assert len(project_path) == 6
# /home/sherika/Development/Falcon/mercenary
assert project_path[5] in PROJECT_NAMES
|
{
"content_hash": "aca3a545df0a723d7fae28854b40d6c4",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 57,
"avg_line_length": 23.71014492753623,
"alnum_prop": 0.6668704156479217,
"repo_name": "wikkiewikkie/elizabeth",
"id": "45d5c778d6886cf04f79ec11c05b9bbd07557fc1",
"size": "1662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_data/test_path.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2072"
},
{
"name": "Python",
"bytes": "286804"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from wagtail.wagtailsearch import index
from wagtail.wagtailsnippets.models import register_snippet
# AlphaSnippet and ZuluSnippet are for testing ordering of
# snippets when registering. They are named as such to ensure
# thier ordering is clear. They are registered during testing
# to ensure specific [in]correct register ordering
# AlphaSnippet is registered during TestSnippetOrdering
@python_2_unicode_compatible
class AlphaSnippet(models.Model):
text = models.CharField(max_length=255)
def __str__(self):
return self.text
# ZuluSnippet is registered during TestSnippetOrdering
@python_2_unicode_compatible
class ZuluSnippet(models.Model):
text = models.CharField(max_length=255)
def __str__(self):
return self.text
# Register model as snippet using register_snippet as both a function and a decorator
class RegisterFunction(models.Model):
pass
register_snippet(RegisterFunction)
@register_snippet
class RegisterDecorator(models.Model):
pass
# A snippet model that inherits from index.Indexed can be searched on
@register_snippet
class SearchableSnippet(models.Model, index.Indexed):
text = models.CharField(max_length=255)
search_fields = (
index.SearchField('text'),
)
def __str__(self):
return self.text
|
{
"content_hash": "e39f4dd71a8b3ff9d4d420b8b8ac341e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 85,
"avg_line_length": 25.4,
"alnum_prop": 0.7559055118110236,
"repo_name": "inonit/wagtail",
"id": "dbb8a274bd0064d213a3cb10b1bbfa72d87386fd",
"size": "1397",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "wagtail/tests/snippets/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "152389"
},
{
"name": "HTML",
"bytes": "253521"
},
{
"name": "JavaScript",
"bytes": "96567"
},
{
"name": "Makefile",
"bytes": "548"
},
{
"name": "Python",
"bytes": "1843138"
},
{
"name": "Shell",
"bytes": "7388"
}
],
"symlink_target": ""
}
|
import logging
import os
import api
from common import database_connector, verify_parameters
import storage
logging = logging.getLogger(__name__)
def ensure_data(sessions):
try:
sessions().query(storage.Monitoring).first()
sessions.remove()
except Exception as ex:
logging.critical('Could not connect to DB: %s', ex.message)
os.exit(1)
def initialize_app():
verify_parameters()
db = storage.prepare_storage(database_connector())
ensure_data(db)
session_manager = storage.DBSessionFactory(db)
return api.create_api([session_manager])
application = initialize_app()
|
{
"content_hash": "3ad4048bee4c435d5ef841b7d973bbcd",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 67,
"avg_line_length": 21.82758620689655,
"alnum_prop": 0.6966824644549763,
"repo_name": "eve-basil/watches",
"id": "c71994e665fe6e649fb285d5397148239dd65227",
"size": "633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basil/watches/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7071"
},
{
"name": "Shell",
"bytes": "1144"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(name='streampy',
version='1.2',
description='Java like stream pipelines, supports parallel operations',
url='https://github.com/tolsac/streampy.git',
author='Camille Tolsa',
author_email='camille.tolsa@gmail.com',
license='MIT',
packages=['.'],
zip_safe=False)
|
{
"content_hash": "bd714c745ff567c99c21d833ea45d0a9",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 77,
"avg_line_length": 31.363636363636363,
"alnum_prop": 0.6492753623188405,
"repo_name": "tolsac/streampy",
"id": "ad094fadd5314a7afd43166523b70e23010d4958",
"size": "345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20245"
}
],
"symlink_target": ""
}
|
"""
Module defining user interface for the application.
"""
import cmd
import fnmatch
from io import StringIO
import shlex
from maxify.metrics import ParsingError, Duration
from termcolor import colored
from maxify.config import (
import_config,
ImportStrategy,
ProjectConflictError,
ConfigError
)
from maxify.repo import Projects, Tasks
from maxify.stopwatch import StopWatch
from maxify.utils import ArgumentParser, cbreak
help_texts = {
"projects": """Prints a list of all available projects.
""",
"import": """Imports projects defined in a specified configuration file.
Configuration files can either by a YAML file or a Python module.
Example:
> import projects.yaml
""",
"switch": """Switches to the project with the specified name.
Examples:
> switch sample1
> switch scopetastic/maxify
""",
"metrics": """Displays available metrics for tasks contained in the current\
project.
Example:
> metrics
""",
"tasks": """List all tasks stored in the current project, and optionally
print details about each one.
Usage:
> tasks [--details] [PATTERN]
The tasks command accepts the following arguments:
--details - Flag used to print out details on each task.
PATTERN - Optional name pattern to use for only displaying a subset of tasks.
The name pattern is a glob pattern.
Examples:
> tasks
> tasks --details
> tasks --details maxify-1*
""",
"task": """Create or update a task associated with the current project.
Tasks can be created or updated either in interactive or non-interactive mode.
Examples (non-interactive):
> task maxify-1 compile_time 2hrs research_time 1hr
> task maxify-1 "debug time" 20mins
Example (interactive):
> task maxify-1
Compile Time: 2 hrs
Research Time: 1 hr
Debug Time: 20 mins
...
"""
}
class MaxifyCmd(cmd.Cmd):
"""Command interpreter used for accepting commands from the user to
manage a Maxify project.
"""
_stopwatch_status_colors = {
StopWatch.STATUS_RUNNING: "green",
StopWatch.STATUS_PAUSED: "yellow",
StopWatch.STATUS_RESET: "magenta",
StopWatch.STATUS_STOPPED: "white"
}
def __init__(self, stdin=None, stdout=None, use_color=True):
cmd.Cmd.__init__(self, stdin=stdin, stdout=stdout)
self.intro = "Maxify programmer time tracker client"
self.prompt = "> "
self.current_project = None
self.use_color = use_color
self.projects = Projects()
self._generate_help_funcs()
def _generate_help_funcs(self):
for command in help_texts:
self._generate_help_func(command)
def _generate_help_func(self, command):
func_name = "help_" + command
def help_func():
self._title("Help - " + command)
self._print(help_texts[command])
setattr(self, func_name, help_func)
def cmdloop(self, args=None):
if args and args.project:
self._set_current_project(args.project)
if self.current_project:
self.intro = self.intro + \
"\n\n" + \
"Switched to project '{0}'\n".format(args.project)
else:
self.intro = self.intro + \
"\n\nNo project found named '{0}'\n".format(args.project)
if args and args.command and len(args.command) > 0:
stdin = StringIO()
self.stdin = stdin
self.prompt = ""
self.use_rawinput = False
stdin.write(" ".join(args.command) + "\nexit\n")
stdin.seek(0)
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
self._print("\nExiting\n")
return
def _set_current_project(self, project_name):
self.current_project = self.projects.get(project_name)
def emptyline(self):
"""Handles an empty line (does nothing)."""
pass
########################################
# Command - quit/exit
########################################
def do_exit(self, line):
"""Exit the application."""
return True
def do_quit(self, line):
"""Exit the application."""
return True
########################################
# Command - switch
########################################
def do_switch(self, line):
"""Switch to a project with the provided name."""
self._set_current_project(line.strip())
if not self.current_project:
self._error("No project found named '{0}'".format(line))
else:
self._success("Switched to project '{0}'".format(
self.current_project.name))
def complete_switch(self, text, line, beginx, endidx):
start_index = len("switch ")
if beginx == start_index:
return self.projects.matching_name(text)
# If beginning index not immediately after command keyword, then
# readline found a organization delimiter, so handle it correctly
# in search for matches, then strip it out of returned results for
# readline tokenized completion logic.
organization = line[start_index:beginx]
partial_name = organization + text
matches = self.projects.matching_name(partial_name)
return [m.replace(organization, "") for m in matches]
########################################
# Command - projects
########################################
def do_projects(self, line):
"""Lists all projects current defined in the user's data file.
"""
projects = self.projects.all()
if not len(projects):
self._print("\nNo projects found\n")
return
orgs = {project.organization for project in projects}
by_org = {org: [p for p in projects if p.organization == org]
for org in orgs}
for org in sorted(orgs, key=lambda o: o if o is not None else ""):
if org:
self._title(org)
else:
self._title("default")
for project in by_org[org]:
self._print_project_summary(project)
self._print()
def _print_project_summary(self, project):
project_str = " * {name} - {desc}".format(
name=project.qualified_name,
desc=project.desc if project.desc else "No description provided")
self._print(project_str)
########################################
# Command - import
########################################
def do_import(self, line):
"""Import projects from a configuration file.
"""
# First, attempt an import and abort if a conflict happens
file_path = line.strip()
try:
projects = import_config(file_path, ImportStrategy.abort)
conflict = False
except ProjectConflictError:
conflict = True
except ConfigError as e:
self._error(str(e))
return
if conflict:
self._warning("Conflicts found between current projects and "
"projects defined in '{}'.".format(file_path))
self._print("\nYou can select one of the following options for "
"continuing with the import:\n"
" - (A)bort - Stops the import and makes no changes.\n"
" - (M)erge - Merges current projects with those "
"being imported.\n"
" - (R)eplace - Replaces current projects with the "
"ones being imported. Any existing conflicting "
"projects will be deleted along with their data.\n\n")
response = input("What would you like to do?: ")
if response.upper() == "M":
self._print("Merging projects...\n")
projects = import_config(file_path, ImportStrategy.merge)
elif response.upper() == "R":
self._print("Replacing projects...\n")
projects = import_config(file_path, ImportStrategy.overwrite)
else:
self._print("Import aborted.\n")
projects = None
if not projects:
return
self._print("\nThe following projects were imported:")
for project in projects:
self._print_project_summary(project)
self._print("\n")
########################################
# Command - metrics
########################################
def do_metrics(self, line):
"""Print out metrics available for the current project."""
if not self.current_project:
self._error("Please select a project first using the 'switch' "
"command")
return
self._title(self.current_project.name + " Metrics:")
for metric in sorted(self.current_project.metrics,
key=lambda m: m.name):
self._print(" * {0} ({1})".format(metric.name,
metric.metric_type.display_name()))
if metric.desc:
self._print(" - Description: " + metric.desc)
if metric.value_range:
self._print(" - Possible Values: "
+ ", ".join(map(str, metric.value_range)))
if metric.default_value:
self._print(" - Default Value: " + str(metric.default_value))
self._print()
########################################
# Command - tasks
########################################
def do_tasks(self, line):
"""Print out a list of tasks for the current project and accumulated
metrics for each task.
"""
parser = ArgumentParser(stdout=self.stdout,
prog="tasks",
add_help=False)
parser.add_argument("--details", action="store_true")
parser.add_argument("pattern", metavar="PATTERN", nargs="?")
args = parser.parse_args(line.split())
if not args:
self._error("Invalid arguments")
return
details = args.details
pattern = args.pattern if args.pattern else "*"
self._title("Tasks")
# align printed values for details by finding longest metric name
metric_names = [m.name for m in self.current_project.metrics]
metric_names.append("Created"),
metric_names.append("Last Updated")
max_name_len = len(max(metric_names, key=len))
detail_fmt = " {0:" + str(max_name_len) + "} | {1}"
for task in sorted(
filter(lambda t: fnmatch.fnmatch(t.name, pattern),
self.current_project.tasks),
key=lambda t: t.name):
self._info(" * " + task.name, extra_newline=False)
if details:
self._print(" " + "-" * 51)
for metric in self.current_project.metrics:
metric_value = task.value(metric)
value = metric.metric_type.to_str(metric_value) \
if metric_value else "----"
self._print(detail_fmt.format(metric.name, value))
self._print()
self._print(detail_fmt.format("Created", task.created))
self._print(detail_fmt.format("Last Updated",
task.last_updated))
self._print()
self._print()
########################################
# Command - task
########################################
def do_task(self, line):
"""Create a task or edit an existing task."""
line = line.strip()
if not line:
self._error("You must specify a task to create or update.\n"
"Usage: task [TASK_NAME]")
return
tokens = shlex.split(line)
task_name = tokens[0]
args = tokens[1:]
if len(args):
success = self._update_task(task_name, args)
else:
success = self._update_task_interactive(task_name)
if success:
self._success("Task updated")
def _update_task_interactive(self, task_name):
self._error("Interactive task input is not implemented yet!")
return False
def _update_task(self, task_name, args):
metrics = []
args_len = len(args)
for i in range(0, args_len, 2):
metric_name = args[i]
# Determine value string
val_idx = i + 1
if val_idx >= args_len:
self._error("Invalid expression. Missing value for: "
+ metric_name)
return
value_str = args[val_idx]
# Determine metric
metric = self.current_project.metric(metric_name)
if not metric:
self._error("Invalid metric: " + metric_name)
return
try:
value = metric.metric_type.parse(value_str)
except ParsingError as e:
self._error(str(e))
return
metrics.append((metric, value))
task = self.current_project.task(task_name)
for metric, value in metrics:
try:
task.record(metric, value)
except ValueError as e:
self._error(str(e))
self.projects.revert()
return False
self.projects.save(self.current_project)
return True
def _print_task(self, task):
output = [
"Created: " + str(task.created),
"Last Updated: " + str(task.last_updated),
'\n'
]
for data_point in task.data_points:
output.append(" {0} -> {1}".format(data_point.metric,
data_point.value))
self._print("\n".join(output) + "\n")
########################################
# Command - stopwatch
########################################
def do_stopwatch(self, line):
"""Creates a new stopwatch for recording time for a particular task.
"""
parser = ArgumentParser(stdout=self.stdout,
prog="stopwatch",
add_help=False)
parser.add_argument("task", metavar="TASK")
parser.add_argument("metric", metavar="METRIC", nargs="?")
args = parser.parse_args(line.split())
if not args.task:
self._print()
self._error("Invalid arguments")
return
# Get task with specified name and optional metric
task = self.current_project.task(args.task, create=False)
if not task:
self._error("Task {} does not exist.".format(args.task))
return
if args.metric:
metric = self.current_project.metric(args.metric)
if not metric:
self._error("Metric {} does not exist.".format(args.metric))
return
else:
metric = None
# Create a stop watch and UI
self._print("\n (R)eset | (S)tart | (P)ause | S(t)op\n")
self.stdout.write(" Stopped\t--:--:--\r")
self.stdout.flush()
stopwatch_active = True
stopwatch = StopWatch()
with cbreak():
while stopwatch_active:
user_input_int = ord(self.stdin.read(1))
if 0 <= user_input_int <= 256:
user_input = chr(user_input_int).upper()
if user_input == "S":
stopwatch.start(tick_callback=self._update_printout)
elif user_input == "P":
stopwatch.pause()
elif user_input == 'R':
stopwatch.reset()
elif user_input == "T":
stopwatch.stop()
stopwatch_active = False
# At this point, stopwatch has been stopped, so now attempt to assign
# its total duration to the task.
if metric:
self._assign_time(task, metric, stopwatch.total)
else:
self._assign_time_interactive(task, stopwatch.total)
self.projects.save(self.current_project)
def _assign_time(self, task, metric, total):
task.record(metric, total)
self._print(' \n\n Added {} to "{}"\n'.format(total, metric.name))
def _assign_time_interactive(self, task, total):
self._title("\n\nAssign Time")
self._print("The stop watch recorded {}. Assign that time to the "
"metrics in this task:\n\n".format(total))
remainder = total
duration_metrics = filter(lambda m: m.metric_type is Duration,
self.current_project.metrics)
for metric in sorted(duration_metrics, key=lambda m: m.name):
parsed_val = None
while parsed_val is None:
value = input(" {} ({} remaining): ".format(metric.name,
remainder))
if value.lower() == "rest":
parsed_val = remainder
else:
try:
parsed_val = Duration.parse(value)
except ParsingError as e:
self._error(str(e))
except:
self._error("Invalid duration")
if parsed_val > remainder:
self._error("{} is greater than remaining time from "
"stopwatch ({})".format(parsed_val, remainder))
parsed_val = None
task.record(metric, parsed_val)
remainder -= parsed_val
if remainder.total_seconds() <= 0:
break
self._print()
def _update_printout(self, total, status):
"""Update printout of current stopwatch value to screen.
:param total: The total as a :class:`datetime.timedelta`.
:param status: The current stopwatch status as a string.
"""
self.stdout.write(" " * 80 + "\r")
self.stdout.write(colored(" {:7}\t{}\r".format(status, total),
self._stopwatch_status_colors[status]))
self.stdout.flush()
def complete_stopwatch(self, text, line, beginx, endidx):
"""Provides support for auto-complete of task name in stopwatch command.
"""
tasks = Tasks(self.current_project)
start_index = len("stopwatch ")
if beginx == start_index:
return [t.name for t in tasks.starts_with(text)]
beginning = line[start_index:beginx]
partial_name = beginning + text
matches = tasks.starts_with(partial_name)
return [t.name.replace(beginning, "") for t in matches]
########################################
# Utility methods
########################################
def _title(self, line):
self._print("\n" + line)
self._print("-" * min(len(line), 80) + "\n")
def _success(self, msg, extra_newline=True):
self._print(msg, 'green', extra_newline)
def _info(self, msg, extra_newline=True):
self._print(msg, 'cyan', extra_newline)
def _warning(self, msg, extra_newline=True, indent=0):
self._print(" " * indent + "Warning: " + msg, "yellow", extra_newline)
def _error(self, msg, extra_newline=True, indent=0):
self._print(" " * indent + "Error: " + msg, "red", extra_newline)
def _print(self, msg=None, color=None, extra_newline=False):
if msg and color and self.use_color:
msg = colored(msg, color)
if not msg:
msg = ""
print(msg, file=self.stdout)
if extra_newline:
print(file=self.stdout)
|
{
"content_hash": "b43e612afdd33147862089e7e11f4e20",
"timestamp": "",
"source": "github",
"line_count": 609,
"max_line_length": 86,
"avg_line_length": 33.160919540229884,
"alnum_prop": 0.5215152265412231,
"repo_name": "rossbayer/maxify",
"id": "fae281817e7a5dc193e5c19001169630e6da766b",
"size": "20195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maxify/ui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85936"
}
],
"symlink_target": ""
}
|
"""
Django settings for fauxra project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dpdk$k#-cku4hc&4u+sdxgr36p8psr4vz00rx=njr20@c%ixr$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'questions',
'images',
'accounts',
'taggit',
'cloudinary',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'fauxra.urls'
WSGI_APPLICATION = 'fauxra.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
|
{
"content_hash": "71d8b09a53f8af838025252945ade567",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 71,
"avg_line_length": 23.61320754716981,
"alnum_prop": 0.7267279264882142,
"repo_name": "mpgarate/OST-fauxra",
"id": "ce626fc0ee39e5b0cbbe298dc1fcea3ea0fd510f",
"size": "2503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fauxra/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162"
},
{
"name": "Python",
"bytes": "28471"
}
],
"symlink_target": ""
}
|
from gwt.ui.HorizontalSplitPanel import (
Factory,
HorizontalSplitPanel,
SplitPanel,
)
|
{
"content_hash": "7adac3278aab73ca396941b440fbb0cd",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 41,
"avg_line_length": 19.8,
"alnum_prop": 0.7272727272727273,
"repo_name": "anandology/pyjamas",
"id": "f4911516878dd7fd2e5571479be39089c8485c0f",
"size": "99",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/pyjamas/ui/HorizontalSplitPanel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "325172"
},
{
"name": "PHP",
"bytes": "121841"
},
{
"name": "Python",
"bytes": "6383764"
},
{
"name": "Shell",
"bytes": "19448"
}
],
"symlink_target": ""
}
|
from oslo.config import cfg
from oslo import messaging
from oslo.utils import excutils
from oslo.utils import importutils
from oslo_concurrency import processutils
import six
from nova import context
from nova.db import base
from nova import exception
from nova.i18n import _LE, _LI, _LW
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
from nova import quota
from nova import rpc
from nova import servicegroup
from nova import utils
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
floating_opts = [
cfg.StrOpt('default_floating_pool',
default='nova',
help='Default pool for floating IPs'),
cfg.BoolOpt('auto_assign_floating_ip',
default=False,
help='Autoassigning floating IP to VM'),
cfg.StrOpt('floating_ip_dns_manager',
default='nova.network.noop_dns_driver.NoopDNSDriver',
help='Full class name for the DNS Manager for floating IPs'),
cfg.StrOpt('instance_dns_manager',
default='nova.network.noop_dns_driver.NoopDNSDriver',
help='Full class name for the DNS Manager for instance IPs'),
cfg.StrOpt('instance_dns_domain',
default='',
help='Full class name for the DNS Zone for instance IPs'),
]
CONF = cfg.CONF
CONF.register_opts(floating_opts)
CONF.import_opt('public_interface', 'nova.network.linux_net')
CONF.import_opt('network_topic', 'nova.network.rpcapi')
class FloatingIP(object):
"""Mixin class for adding floating IP functionality to a manager."""
servicegroup_api = None
def init_host_floating_ips(self):
"""Configures floating ips owned by host."""
admin_context = context.get_admin_context()
try:
floating_ips = objects.FloatingIPList.get_by_host(admin_context,
self.host)
except exception.NotFound:
return
for floating_ip in floating_ips:
if floating_ip.fixed_ip_id:
try:
fixed_ip = floating_ip.fixed_ip
except exception.FixedIpNotFound:
LOG.debug('Fixed ip %s not found', floating_ip.fixed_ip_id)
continue
interface = CONF.public_interface or floating_ip.interface
try:
self.l3driver.add_floating_ip(floating_ip.address,
fixed_ip.address,
interface,
fixed_ip.network)
except processutils.ProcessExecutionError:
LOG.debug('Interface %s not found', interface)
raise exception.NoFloatingIpInterface(interface=interface)
def allocate_for_instance(self, context, **kwargs):
"""Handles allocating the floating IP resources for an instance.
calls super class allocate_for_instance() as well
rpc.called by network_api
"""
instance_uuid = kwargs.get('instance_id')
if not uuidutils.is_uuid_like(instance_uuid):
instance_uuid = kwargs.get('instance_uuid')
project_id = kwargs.get('project_id')
# call the next inherited class's allocate_for_instance()
# which is currently the NetworkManager version
# do this first so fixed ip is already allocated
nw_info = super(FloatingIP, self).allocate_for_instance(context,
**kwargs)
if CONF.auto_assign_floating_ip:
context = context.elevated()
# allocate a floating ip
floating_address = self.allocate_floating_ip(context, project_id,
True)
LOG.debug("floating IP allocation for instance "
"|%s|", floating_address,
instance_uuid=instance_uuid, context=context)
# get the first fixed address belonging to the instance
fixed_ips = nw_info.fixed_ips()
fixed_address = fixed_ips[0]['address']
# associate the floating ip to fixed_ip
self.associate_floating_ip(context,
floating_address,
fixed_address,
affect_auto_assigned=True)
# create a fresh set of network info that contains the floating ip
nw_info = self.get_instance_nw_info(context, **kwargs)
return nw_info
def deallocate_for_instance(self, context, **kwargs):
"""Handles deallocating floating IP resources for an instance.
calls super class deallocate_for_instance() as well.
rpc.called by network_api
"""
if 'instance' in kwargs:
instance_uuid = kwargs['instance'].uuid
else:
instance_uuid = kwargs['instance_id']
if not uuidutils.is_uuid_like(instance_uuid):
# NOTE(francois.charlier): in some cases the instance might be
# deleted before the IPs are released, so we need to get
# deleted instances too
instance = objects.Instance.get_by_id(
context.elevated(read_deleted='yes'), instance_uuid)
instance_uuid = instance.uuid
try:
fixed_ips = objects.FixedIPList.get_by_instance_uuid(
context, instance_uuid)
except exception.FixedIpNotFoundForInstance:
fixed_ips = []
# add to kwargs so we can pass to super to save a db lookup there
kwargs['fixed_ips'] = fixed_ips
for fixed_ip in fixed_ips:
fixed_id = fixed_ip.id
floating_ips = objects.FloatingIPList.get_by_fixed_ip_id(context,
fixed_id)
# disassociate floating ips related to fixed_ip
for floating_ip in floating_ips:
address = str(floating_ip.address)
try:
self.disassociate_floating_ip(context,
address,
affect_auto_assigned=True)
except exception.FloatingIpNotAssociated:
LOG.info(_LI("Floating IP %s is not associated. Ignore."),
address)
# deallocate if auto_assigned
if floating_ip.auto_assigned:
self.deallocate_floating_ip(context, address,
affect_auto_assigned=True)
# call the next inherited class's deallocate_for_instance()
# which is currently the NetworkManager version
# call this after so floating IPs are handled first
super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
def _floating_ip_owned_by_project(self, context, floating_ip):
"""Raises if floating ip does not belong to project."""
if context.is_admin:
return
if floating_ip.project_id != context.project_id:
if floating_ip.project_id is None:
LOG.warning(_LW('Address |%(address)s| is not allocated'),
{'address': floating_ip.address})
raise exception.Forbidden()
else:
LOG.warning(_LW('Address |%(address)s| is not allocated '
'to your project |%(project)s|'),
{'address': floating_ip.address,
'project': context.project_id})
raise exception.Forbidden()
def _floating_ip_pool_exists(self, context, name):
"""Returns true if the specified floating ip pool exists. Otherwise,
returns false.
"""
pools = [pool.get('name') for pool in
self.get_floating_ip_pools(context)]
if name in pools:
return True
return False
def allocate_floating_ip(self, context, project_id, auto_assigned=False,
pool=None):
"""Gets a floating ip from the pool."""
# NOTE(tr3buchet): all network hosts in zone now use the same pool
pool = pool or CONF.default_floating_pool
use_quota = not auto_assigned
if not self._floating_ip_pool_exists(context, pool):
raise exception.FloatingIpPoolNotFound()
# Check the quota; can't put this in the API because we get
# called into from other places
try:
if use_quota:
reservations = QUOTAS.reserve(context, floating_ips=1,
project_id=project_id)
except exception.OverQuota:
LOG.warning(_LW("Quota exceeded for %s, tried to allocate "
"floating IP"), context.project_id)
raise exception.FloatingIpLimitExceeded()
try:
floating_ip = objects.FloatingIP.allocate_address(
context, project_id, pool, auto_assigned=auto_assigned)
payload = dict(project_id=project_id, floating_ip=floating_ip)
self.notifier.info(context,
'network.floating_ip.allocate', payload)
# Commit the reservations
if use_quota:
QUOTAS.commit(context, reservations, project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
if use_quota:
QUOTAS.rollback(context, reservations,
project_id=project_id)
return floating_ip
@messaging.expected_exceptions(exception.FloatingIpNotFoundForAddress)
def deallocate_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Returns a floating ip to the pool."""
floating_ip = objects.FloatingIP.get_by_address(context, address)
# handle auto_assigned
if not affect_auto_assigned and floating_ip.auto_assigned:
return
use_quota = not floating_ip.auto_assigned
# make sure project owns this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# make sure floating ip is not associated
if floating_ip.fixed_ip_id:
floating_address = floating_ip.address
raise exception.FloatingIpAssociated(address=floating_address)
# clean up any associated DNS entries
self._delete_all_entries_for_ip(context,
floating_ip.address)
payload = dict(project_id=floating_ip.project_id,
floating_ip=str(floating_ip.address))
self.notifier.info(context, 'network.floating_ip.deallocate', payload)
project_id = floating_ip.project_id
# Get reservations...
try:
if use_quota:
reservations = QUOTAS.reserve(context,
project_id=project_id,
floating_ips=-1)
else:
reservations = None
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deallocating "
"floating IP"))
rows_updated = objects.FloatingIP.deallocate(context, address)
# number of updated rows will be 0 if concurrently another
# API call has also deallocated the same floating ip
if not rows_updated:
if reservations:
QUOTAS.rollback(context, reservations, project_id=project_id)
else:
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
@messaging.expected_exceptions(exception.FloatingIpNotFoundForAddress)
def associate_floating_ip(self, context, floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
Makes sure everything makes sense then calls _associate_floating_ip,
rpc'ing to correct host if i'm not it.
Access to the floating_address is verified but access to the
fixed_address is not verified. This assumes that that the calling
side has already verified that the fixed_address is legal by
checking access to the instance.
"""
floating_ip = objects.FloatingIP.get_by_address(context,
floating_address)
# handle auto_assigned
if not affect_auto_assigned and floating_ip.auto_assigned:
return
# make sure project owns this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# disassociate any already associated
orig_instance_uuid = None
if floating_ip.fixed_ip_id:
# find previously associated instance
fixed_ip = floating_ip.fixed_ip
if str(fixed_ip.address) == fixed_address:
# NOTE(vish): already associated to this address
return
orig_instance_uuid = fixed_ip.instance_uuid
self.disassociate_floating_ip(context, floating_address)
fixed_ip = objects.FixedIP.get_by_address(context, fixed_address)
# send to correct host, unless i'm the correct host
network = objects.Network.get_by_id(context.elevated(),
fixed_ip.network_id)
if network.multi_host:
instance = objects.Instance.get_by_uuid(
context, fixed_ip.instance_uuid)
host = instance.host
else:
host = network.host
interface = floating_ip.interface
if host == self.host:
# i'm the correct host
self._associate_floating_ip(context, floating_address,
fixed_address, interface,
fixed_ip.instance_uuid)
else:
# send to correct host
self.network_rpcapi._associate_floating_ip(context,
floating_address, fixed_address, interface, host,
fixed_ip.instance_uuid)
return orig_instance_uuid
def _associate_floating_ip(self, context, floating_address, fixed_address,
interface, instance_uuid):
"""Performs db and driver calls to associate floating ip & fixed ip."""
interface = CONF.public_interface or interface
@utils.synchronized(unicode(floating_address))
def do_associate():
# associate floating ip
floating = objects.FloatingIP.associate(context, floating_address,
fixed_address, self.host)
fixed = floating.fixed_ip
if not fixed:
# NOTE(vish): ip was already associated
return
try:
# gogo driver time
self.l3driver.add_floating_ip(floating_address, fixed_address,
interface, fixed['network'])
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
try:
objects.FloatingIP.disassociate(context,
floating_address)
except Exception:
LOG.warning(_LW('Failed to disassociated floating '
'address: %s'), floating_address)
pass
if "Cannot find device" in six.text_type(e):
try:
LOG.error(_LE('Interface %s not found'), interface)
except Exception:
pass
raise exception.NoFloatingIpInterface(
interface=interface)
payload = dict(project_id=context.project_id,
instance_id=instance_uuid,
floating_ip=floating_address)
self.notifier.info(context,
'network.floating_ip.associate', payload)
do_associate()
@messaging.expected_exceptions(exception.FloatingIpNotFoundForAddress)
def disassociate_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from its fixed ip.
Makes sure everything makes sense then calls _disassociate_floating_ip,
rpc'ing to correct host if i'm not it.
"""
floating_ip = objects.FloatingIP.get_by_address(context, address)
# handle auto assigned
if not affect_auto_assigned and floating_ip.auto_assigned:
raise exception.CannotDisassociateAutoAssignedFloatingIP()
# make sure project owns this floating ip (allocated)
self._floating_ip_owned_by_project(context, floating_ip)
# make sure floating ip is associated
if not floating_ip.fixed_ip_id:
floating_address = floating_ip.address
raise exception.FloatingIpNotAssociated(address=floating_address)
fixed_ip = objects.FixedIP.get_by_id(context, floating_ip.fixed_ip_id)
# send to correct host, unless i'm the correct host
network = objects.Network.get_by_id(context.elevated(),
fixed_ip.network_id)
interface = floating_ip.interface
if network.multi_host:
instance = objects.Instance.get_by_uuid(
context, fixed_ip.instance_uuid)
service = objects.Service.get_by_host_and_topic(
context.elevated(), instance.host, CONF.network_topic)
if service and self.servicegroup_api.service_is_up(service):
host = instance.host
else:
# NOTE(vish): if the service is down just deallocate the data
# locally. Set the host to local so the call will
# not go over rpc and set interface to None so the
# teardown in the driver does not happen.
host = self.host
interface = None
else:
host = network.host
if host == self.host:
# i'm the correct host
self._disassociate_floating_ip(context, address, interface,
fixed_ip.instance_uuid)
else:
# send to correct host
self.network_rpcapi._disassociate_floating_ip(context, address,
interface, host, fixed_ip.instance_uuid)
def _disassociate_floating_ip(self, context, address, interface,
instance_uuid):
"""Performs db and driver calls to disassociate floating ip."""
interface = CONF.public_interface or interface
@utils.synchronized(unicode(address))
def do_disassociate():
# NOTE(vish): Note that we are disassociating in the db before we
# actually remove the ip address on the host. We are
# safe from races on this host due to the decorator,
# but another host might grab the ip right away. We
# don't worry about this case because the minuscule
# window where the ip is on both hosts shouldn't cause
# any problems.
floating = objects.FloatingIP.disassociate(context, address)
fixed = floating.fixed_ip
if not fixed:
# NOTE(vish): ip was already disassociated
return
if interface:
# go go driver time
self.l3driver.remove_floating_ip(address, fixed.address,
interface, fixed.network)
payload = dict(project_id=context.project_id,
instance_id=instance_uuid,
floating_ip=address)
self.notifier.info(context,
'network.floating_ip.disassociate', payload)
do_disassociate()
@messaging.expected_exceptions(exception.FloatingIpNotFound)
def get_floating_ip(self, context, id):
"""Returns a floating IP as a dict."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
return dict(objects.FloatingIP.get_by_id(context, id).iteritems())
def get_floating_pools(self, context):
"""Returns list of floating pools."""
# NOTE(maurosr) This method should be removed in future, replaced by
# get_floating_ip_pools. See bug #1091668
return self.get_floating_ip_pools(context)
def get_floating_ip_pools(self, context):
"""Returns list of floating ip pools."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
pools = objects.FloatingIP.get_pool_names(context)
return [dict(name=name) for name in pools]
def get_floating_ip_by_address(self, context, address):
"""Returns a floating IP as a dict."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
return objects.FloatingIP.get_by_address(context, address)
def get_floating_ips_by_project(self, context):
"""Returns the floating IPs allocated to a project."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
return objects.FloatingIPList.get_by_project(context,
context.project_id)
def get_floating_ips_by_fixed_address(self, context, fixed_address):
"""Returns the floating IPs associated with a fixed_address."""
# NOTE(vish): This is no longer used but can't be removed until
# we major version the network_rpcapi.
floating_ips = objects.FloatingIPList.get_by_fixed_address(
context, fixed_address)
return [str(floating_ip.address) for floating_ip in floating_ips]
def _is_stale_floating_ip_address(self, context, floating_ip):
try:
self._floating_ip_owned_by_project(context, floating_ip)
except exception.Forbidden:
return True
return False if floating_ip.get('fixed_ip_id') else True
def migrate_instance_start(self, context, instance_uuid,
floating_addresses,
rxtx_factor=None, project_id=None,
source=None, dest=None):
# We only care if floating_addresses are provided and we're
# switching hosts
if not floating_addresses or (source and source == dest):
return
LOG.info(_LI("Starting migration network for instance %s"),
instance_uuid)
for address in floating_addresses:
floating_ip = objects.FloatingIP.get_by_address(context, address)
if self._is_stale_floating_ip_address(context, floating_ip):
LOG.warning(_LW("Floating ip address |%(address)s| no longer "
"belongs to instance %(instance_uuid)s. "
"Will not migrate it "),
{'address': address,
'instance_uuid': instance_uuid})
continue
interface = CONF.public_interface or floating_ip.interface
fixed_ip = floating_ip.fixed_ip
self.l3driver.remove_floating_ip(floating_ip.address,
fixed_ip.address,
interface,
fixed_ip.network)
# NOTE(wenjianhn): Make this address will not be bound to public
# interface when restarts nova-network on dest compute node
floating_ip.host = None
floating_ip.save()
def migrate_instance_finish(self, context, instance_uuid,
floating_addresses, host=None,
rxtx_factor=None, project_id=None,
source=None, dest=None):
# We only care if floating_addresses are provided and we're
# switching hosts
if host and not dest:
dest = host
if not floating_addresses or (source and source == dest):
return
LOG.info(_LI("Finishing migration network for instance %s"),
instance_uuid)
for address in floating_addresses:
floating_ip = objects.FloatingIP.get_by_address(context, address)
if self._is_stale_floating_ip_address(context, floating_ip):
LOG.warning(_LW("Floating ip address |%(address)s| no longer "
"belongs to instance %(instance_uuid)s. "
"Will not setup it."),
{'address': address,
'instance_uuid': instance_uuid})
continue
floating_ip.host = dest
floating_ip.save()
interface = CONF.public_interface or floating_ip.interface
fixed_ip = floating_ip.fixed_ip
self.l3driver.add_floating_ip(floating_ip.address,
fixed_ip.address,
interface,
fixed_ip.network)
def _prepare_domain_entry(self, context, domainref):
scope = domainref.scope
if scope == 'private':
this_domain = {'domain': domainref.domain,
'scope': scope,
'availability_zone': domainref.availability_zone}
else:
this_domain = {'domain': domainref.domain,
'scope': scope,
'project': domainref.project_id}
return this_domain
def get_dns_domains(self, context):
domains = []
domain_list = objects.DNSDomainList.get_all(context)
floating_driver_domain_list = self.floating_dns_manager.get_domains()
instance_driver_domain_list = self.instance_dns_manager.get_domains()
for dns_domain in domain_list:
if (dns_domain.domain in floating_driver_domain_list or
dns_domain.domain in instance_driver_domain_list):
domain_entry = self._prepare_domain_entry(context,
dns_domain)
if domain_entry:
domains.append(domain_entry)
else:
LOG.warning(_LW('Database inconsistency: DNS domain |%s| is '
'registered in the Nova db but not visible to '
'either the floating or instance DNS driver. '
'It will be ignored.'), dns_domain.domain)
return domains
def add_dns_entry(self, context, address, name, dns_type, domain):
self.floating_dns_manager.create_entry(name, address,
dns_type, domain)
def modify_dns_entry(self, context, address, name, domain):
self.floating_dns_manager.modify_address(name, address,
domain)
def delete_dns_entry(self, context, name, domain):
self.floating_dns_manager.delete_entry(name, domain)
def _delete_all_entries_for_ip(self, context, address):
domain_list = self.get_dns_domains(context)
for domain in domain_list:
names = self.get_dns_entries_by_address(context,
address,
domain['domain'])
for name in names:
self.delete_dns_entry(context, name, domain['domain'])
def get_dns_entries_by_address(self, context, address, domain):
return self.floating_dns_manager.get_entries_by_address(address,
domain)
def get_dns_entries_by_name(self, context, name, domain):
return self.floating_dns_manager.get_entries_by_name(name,
domain)
def create_private_dns_domain(self, context, domain, av_zone):
objects.DNSDomain.register_for_zone(context, domain, av_zone)
try:
self.instance_dns_manager.create_domain(domain)
except exception.FloatingIpDNSExists:
LOG.warning(_LW('Domain |%(domain)s| already exists, '
'changing zone to |%(av_zone)s|.'),
{'domain': domain, 'av_zone': av_zone})
def create_public_dns_domain(self, context, domain, project):
objects.DNSDomain.register_for_project(context, domain, project)
try:
self.floating_dns_manager.create_domain(domain)
except exception.FloatingIpDNSExists:
LOG.warning(_LW('Domain |%(domain)s| already exists, '
'changing project to |%(project)s|.'),
{'domain': domain, 'project': project})
def delete_dns_domain(self, context, domain):
objects.DNSDomain.delete_by_domain(context, domain)
self.floating_dns_manager.delete_domain(domain)
class LocalManager(base.Base, FloatingIP):
def __init__(self):
super(LocalManager, self).__init__()
# NOTE(vish): setting the host to none ensures that the actual
# l3driver commands for l3 are done via rpc.
self.host = None
self.servicegroup_api = servicegroup.API()
self.network_rpcapi = network_rpcapi.NetworkAPI()
self.floating_dns_manager = importutils.import_object(
CONF.floating_ip_dns_manager)
self.instance_dns_manager = importutils.import_object(
CONF.instance_dns_manager)
self.notifier = rpc.get_notifier('network', CONF.host)
|
{
"content_hash": "faf27520e557c87727d205ddc888a855",
"timestamp": "",
"source": "github",
"line_count": 689,
"max_line_length": 79,
"avg_line_length": 45,
"alnum_prop": 0.5580067731011127,
"repo_name": "mgagne/nova",
"id": "bf9e16a61cbc60e1312bef7910ba68a6a92803ca",
"size": "31799",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nova/network/floating_ips.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15421976"
},
{
"name": "Shell",
"bytes": "21612"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('versions', '0018_alter_license_some_rights'),
]
operations = [
migrations.CreateModel(
name='DeniedInstallOrigin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname_pattern', models.CharField(help_text='Hostname unix-style pattern to deny.', max_length=255, unique=True)),
('include_subdomains', models.BooleanField(default=False, help_text='Automatically check for subdomains of hostname pattern (Additional check with `*.` prepended to the original pattern).')),
],
),
]
|
{
"content_hash": "f01c17bc875a7affedb8430e188da708",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 207,
"avg_line_length": 40.473684210526315,
"alnum_prop": 0.6332899869960988,
"repo_name": "wagnerand/addons-server",
"id": "76e36d3ef4adc9c698e20d14c950436724403532",
"size": "818",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/olympia/versions/migrations/0019_auto_20211123_1229.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "245987"
},
{
"name": "Dockerfile",
"bytes": "3900"
},
{
"name": "HTML",
"bytes": "290334"
},
{
"name": "JavaScript",
"bytes": "749163"
},
{
"name": "Less",
"bytes": "211386"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6780019"
},
{
"name": "Shell",
"bytes": "8638"
},
{
"name": "Smarty",
"bytes": "1261"
}
],
"symlink_target": ""
}
|
"""
Pubsub envelope publisher
Author: Guillaume Aubert (gaubert) <guillaume(dot)aubert(at)gmail(dot)com>
"""
import time
import zmq
def main():
"""main method"""
# Prepare our context and publisher
context = zmq.Context()
publisher = context.socket(zmq.PUB)
publisher.bind("tcp://*:5563")
while True:
# Write two messages, each with an envelope and content
publisher.send_multipart([b"A", b"We don't want to see this"])
publisher.send_multipart([b"B", b"We would like to see this"])
time.sleep(1)
# We never get here but clean up anyhow
publisher.close()
context.term()
if __name__ == "__main__":
main()
|
{
"content_hash": "76938ff30174ccab477b7b5fd8df2c3f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 22.35483870967742,
"alnum_prop": 0.6248196248196248,
"repo_name": "soscpd/bee",
"id": "005bd8ce2f4cb0f732810220ad4eb2848567d715",
"size": "693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "root/tests/zguide/examples/Python/psenvpub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "5312"
},
{
"name": "Assembly",
"bytes": "140668"
},
{
"name": "Batchfile",
"bytes": "15927"
},
{
"name": "C",
"bytes": "4031317"
},
{
"name": "C#",
"bytes": "132162"
},
{
"name": "C++",
"bytes": "1118150"
},
{
"name": "CMake",
"bytes": "38365"
},
{
"name": "CSS",
"bytes": "8089"
},
{
"name": "Clojure",
"bytes": "40755"
},
{
"name": "Common Lisp",
"bytes": "72688"
},
{
"name": "Erlang",
"bytes": "39731"
},
{
"name": "F#",
"bytes": "50827"
},
{
"name": "Go",
"bytes": "89098"
},
{
"name": "Groff",
"bytes": "239493"
},
{
"name": "HTML",
"bytes": "1219626"
},
{
"name": "Haskell",
"bytes": "103178"
},
{
"name": "Haxe",
"bytes": "178560"
},
{
"name": "Java",
"bytes": "292976"
},
{
"name": "JavaScript",
"bytes": "22783"
},
{
"name": "Julia",
"bytes": "6156"
},
{
"name": "Lex",
"bytes": "1567"
},
{
"name": "Lua",
"bytes": "466952"
},
{
"name": "Makefile",
"bytes": "236470"
},
{
"name": "OCaml",
"bytes": "9192"
},
{
"name": "Objective-C",
"bytes": "19325"
},
{
"name": "PHP",
"bytes": "373357"
},
{
"name": "Pascal",
"bytes": "74361"
},
{
"name": "Perl",
"bytes": "81419"
},
{
"name": "Python",
"bytes": "360488"
},
{
"name": "QML",
"bytes": "150"
},
{
"name": "QMake",
"bytes": "3132"
},
{
"name": "Racket",
"bytes": "6508"
},
{
"name": "Ruby",
"bytes": "207081"
},
{
"name": "Scala",
"bytes": "60650"
},
{
"name": "Shell",
"bytes": "982085"
},
{
"name": "Tcl",
"bytes": "155117"
},
{
"name": "Yacc",
"bytes": "4700"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import binascii
from twisted.python import log
from twisted.internet.protocol import Factory
from twisted.protocols.basic import Int32StringReceiver
from twisted.internet.error import ConnectionDone
from autobahn.twisted.util import peer2str
from autobahn.wamp.exception import ProtocolError, SerializationError, TransportLost
__all__ = (
'WampRawSocketServerProtocol',
'WampRawSocketClientProtocol',
'WampRawSocketServerFactory',
'WampRawSocketClientFactory'
)
class WampRawSocketProtocol(Int32StringReceiver):
"""
Base class for Twisted-based WAMP-over-RawSocket protocols.
"""
def connectionMade(self):
if self.factory.debug:
log.msg("WampRawSocketProtocol: connection made")
# the peer we are connected to
#
try:
peer = self.transport.getPeer()
except AttributeError:
# ProcessProtocols lack getPeer()
self.peer = "?"
else:
self.peer = peer2str(peer)
# this will hold an ApplicationSession object
# once the RawSocket opening handshake has been
# completed
#
self._session = None
# Will hold the negotiated serializer once the opening handshake is complete
#
self._serializer = None
# Will be set to True once the opening handshake is complete
#
self._handshake_complete = False
# Buffer for opening handshake received bytes.
#
self._handshake_bytes = b''
# Clinet requested maximum length of serialized messages.
#
self._max_len_send = None
def _on_handshake_complete(self):
try:
self._session = self.factory._factory()
self._session.onOpen(self)
except Exception as e:
# Exceptions raised in onOpen are fatal ..
if self.factory.debug:
log.msg("WampRawSocketProtocol: ApplicationSession constructor / onOpen raised ({0})".format(e))
self.abort()
else:
if self.factory.debug:
log.msg("ApplicationSession started.")
def connectionLost(self, reason):
if self.factory.debug:
log.msg("WampRawSocketProtocol: connection lost: reason = '{0}'".format(reason))
try:
wasClean = isinstance(reason.value, ConnectionDone)
self._session.onClose(wasClean)
except Exception as e:
# silently ignore exceptions raised here ..
if self.factory.debug:
log.msg("WampRawSocketProtocol: ApplicationSession.onClose raised ({0})".format(e))
self._session = None
def stringReceived(self, payload):
if self.factory.debug:
log.msg("WampRawSocketProtocol: RX octets: {0}".format(binascii.hexlify(payload)))
try:
for msg in self._serializer.unserialize(payload):
if self.factory.debug:
log.msg("WampRawSocketProtocol: RX WAMP message: {0}".format(msg))
self._session.onMessage(msg)
except ProtocolError as e:
log.msg(str(e))
if self.factory.debug:
log.msg("WampRawSocketProtocol: WAMP Protocol Error ({0}) - aborting connection".format(e))
self.abort()
except Exception as e:
if self.factory.debug:
log.msg("WampRawSocketProtocol: WAMP Internal Error ({0}) - aborting connection".format(e))
self.abort()
def send(self, msg):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.send`
"""
if self.isOpen():
if self.factory.debug:
log.msg("WampRawSocketProtocol: TX WAMP message: {0}".format(msg))
try:
payload, _ = self._serializer.serialize(msg)
except Exception as e:
# all exceptions raised from above should be serialization errors ..
raise SerializationError("WampRawSocketProtocol: unable to serialize WAMP application payload ({0})".format(e))
else:
self.sendString(payload)
if self.factory.debug:
log.msg("WampRawSocketProtocol: TX octets: {0}".format(binascii.hexlify(payload)))
else:
raise TransportLost()
def isOpen(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.isOpen`
"""
return self._session is not None
def close(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.close`
"""
if self.isOpen():
self.transport.loseConnection()
else:
raise TransportLost()
def abort(self):
"""
Implements :func:`autobahn.wamp.interfaces.ITransport.abort`
"""
if self.isOpen():
if hasattr(self.transport, 'abortConnection'):
# ProcessProtocol lacks abortConnection()
self.transport.abortConnection()
else:
self.transport.loseConnection()
else:
raise TransportLost()
class WampRawSocketServerProtocol(WampRawSocketProtocol):
"""
Base class for Twisted-based WAMP-over-RawSocket server protocols.
"""
def dataReceived(self, data):
if self._handshake_complete:
WampRawSocketProtocol.dataReceived(self, data)
else:
remaining = 4 - len(self._handshake_bytes)
self._handshake_bytes += data[:remaining]
if len(self._handshake_bytes) == 4:
if self.factory.debug:
log.msg("WampRawSocketProtocol: opening handshake received - {0}".format(binascii.b2a_hex(self._handshake_bytes)))
if ord(self._handshake_bytes[0]) != 0x7f:
if self.factory.debug:
log.msg("WampRawSocketProtocol: invalid magic byte (octet 1) in opening handshake: was 0x{0}, but expected 0x7f".format(binascii.b2a_hex(self._handshake_bytes[0])))
self.abort()
# peer requests us to send messages of maximum length 2**max_len_exp
#
self._max_len_send = 2 ** (9 + (ord(self._handshake_bytes[1]) >> 4))
if self.factory.debug:
log.msg("WampRawSocketProtocol: client requests us to send out most {} bytes per message".format(self._max_len_send))
# client wants to speak this serialization format
#
ser_id = ord(self._handshake_bytes[1]) & 0x0F
if ser_id in self.factory._serializers:
self._serializer = self.factory._serializers[ser_id]
if self.factory.debug:
log.msg("WampRawSocketProtocol: client wants to use serializer {}".format(ser_id))
else:
if self.factory.debug:
log.msg("WampRawSocketProtocol: opening handshake - no suitable serializer found (client requested {0}, and we have {1})".format(ser_id, self.factory._serializers.keys()))
self.abort()
# we request the peer to send message of maximum length 2**reply_max_len_exp
#
reply_max_len_exp = 24
# send out handshake reply
#
reply_octet2 = chr(((reply_max_len_exp - 9) << 4) | self._serializer.RAWSOCKET_SERIALIZER_ID)
self.transport.write(b'\x7F') # magic byte
self.transport.write(reply_octet2) # max length / serializer
self.transport.write(b'\x00\x00') # reserved octets
self._handshake_complete = True
self._on_handshake_complete()
if self.factory.debug:
log.msg("WampRawSocketProtocol: opening handshake completed", self._serializer)
# consume any remaining data received already ..
#
data = data[remaining:]
if data:
self.dataReceived(data)
class WampRawSocketClientProtocol(WampRawSocketProtocol):
"""
Base class for Twisted-based WAMP-over-RawSocket client protocols.
"""
def connectionMade(self):
WampRawSocketProtocol.connectionMade(self)
self._serializer = self.factory._serializer
# we request the peer to send message of maximum length 2**reply_max_len_exp
#
request_max_len_exp = 24
# send out handshake reply
#
request_octet2 = chr(((request_max_len_exp - 9) << 4) | self._serializer.RAWSOCKET_SERIALIZER_ID)
self.transport.write(b'\x7F') # magic byte
self.transport.write(request_octet2) # max length / serializer
self.transport.write(b'\x00\x00') # reserved octets
def dataReceived(self, data):
if self._handshake_complete:
WampRawSocketProtocol.dataReceived(self, data)
else:
remaining = 4 - len(self._handshake_bytes)
self._handshake_bytes += data[:remaining]
if len(self._handshake_bytes) == 4:
if self.factory.debug:
log.msg("WampRawSocketProtocol: opening handshake received - {0}".format(binascii.b2a_hex(self._handshake_bytes)))
if ord(self._handshake_bytes[0]) != 0x7f:
if self.factory.debug:
log.msg("WampRawSocketProtocol: invalid magic byte (octet 1) in opening handshake: was 0x{0}, but expected 0x7f".format(binascii.b2a_hex(self._handshake_bytes[0])))
self.abort()
# peer requests us to send messages of maximum length 2**max_len_exp
#
self._max_len_send = 2 ** (9 + (ord(self._handshake_bytes[1]) >> 4))
if self.factory.debug:
log.msg("WampRawSocketProtocol: server requests us to send out most {} bytes per message".format(self._max_len_send))
# client wants to speak this serialization format
#
ser_id = ord(self._handshake_bytes[1]) & 0x0F
if ser_id != self._serializer.RAWSOCKET_SERIALIZER_ID:
if self.factory.debug:
log.msg("WampRawSocketProtocol: opening handshake - no suitable serializer found (server replied {0}, and we requested {1})".format(ser_id, self._serializer.RAWSOCKET_SERIALIZER_ID))
self.abort()
self._handshake_complete = True
self._on_handshake_complete()
if self.factory.debug:
log.msg("WampRawSocketProtocol: opening handshake completed", self._serializer)
# consume any remaining data received already ..
#
data = data[remaining:]
if data:
self.dataReceived(data)
class WampRawSocketFactory(Factory):
"""
Base class for Twisted-based WAMP-over-RawSocket factories.
"""
class WampRawSocketServerFactory(WampRawSocketFactory):
"""
Base class for Twisted-based WAMP-over-RawSocket server factories.
"""
protocol = WampRawSocketServerProtocol
def __init__(self, factory, serializers=None, debug=False):
"""
:param factory: A callable that produces instances that implement
:class:`autobahn.wamp.interfaces.ITransportHandler`
:type factory: callable
:param serializers: A list of WAMP serializers to use (or None for default
serializers). Serializers must implement
:class:`autobahn.wamp.interfaces.ISerializer`.
:type serializers: list
"""
assert(callable(factory))
self._factory = factory
self.debug = debug
if serializers is None:
serializers = []
# try MsgPack WAMP serializer
try:
from autobahn.wamp.serializer import MsgPackSerializer
serializers.append(MsgPackSerializer(batched=True))
serializers.append(MsgPackSerializer())
except ImportError:
pass
# try JSON WAMP serializer
try:
from autobahn.wamp.serializer import JsonSerializer
serializers.append(JsonSerializer(batched=True))
serializers.append(JsonSerializer())
except ImportError:
pass
if not serializers:
raise Exception("could not import any WAMP serializers")
self._serializers = {}
for ser in serializers:
self._serializers[ser.RAWSOCKET_SERIALIZER_ID] = ser
class WampRawSocketClientFactory(WampRawSocketFactory):
"""
Base class for Twisted-based WAMP-over-RawSocket client factories.
"""
protocol = WampRawSocketClientProtocol
def __init__(self, factory, serializer=None, debug=False):
"""
:param factory: A callable that produces instances that implement
:class:`autobahn.wamp.interfaces.ITransportHandler`
:type factory: callable
:param serializer: The WAMP serializer to use (or None for default
serializer). Serializers must implement
:class:`autobahn.wamp.interfaces.ISerializer`.
:type serializer: obj
"""
assert(callable(factory))
self._factory = factory
self.debug = debug
if serializer is None:
# try MsgPack WAMP serializer
try:
from autobahn.wamp.serializer import MsgPackSerializer
serializer = MsgPackSerializer()
except ImportError:
pass
if serializer is None:
# try JSON WAMP serializer
try:
from autobahn.wamp.serializer import JsonSerializer
serializer = JsonSerializer()
except ImportError:
pass
if serializer is None:
raise Exception("could not import any WAMP serializer")
self._serializer = serializer
|
{
"content_hash": "e4dd38bc24f59625168d17055850b576",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 206,
"avg_line_length": 37.046875,
"alnum_prop": 0.5901166877548151,
"repo_name": "iffy/AutobahnPython",
"id": "56458d087bdbfb2e93800da757b224228f63b01e",
"size": "15503",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "autobahn/twisted/rawsocket.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "2711"
},
{
"name": "HTML",
"bytes": "86275"
},
{
"name": "JavaScript",
"bytes": "104724"
},
{
"name": "Makefile",
"bytes": "4809"
},
{
"name": "Python",
"bytes": "1304463"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
}
|
"""Permission database operations."""
from compass.db.api import database
from compass.db.api import utils
from compass.db import exception
from compass.db import models
SUPPORTED_FIELDS = ['name', 'alias', 'description']
RESP_FIELDS = ['id', 'name', 'alias', 'description']
class PermissionWrapper(object):
def __init__(self, name, alias, description):
self.name = name
self.alias = alias
self.description = description
def to_dict(self):
return {
'name': self.name,
'alias': self.alias,
'description': self.description
}
PERMISSION_LIST_PERMISSIONS = PermissionWrapper(
'list_permissions', 'list permissions', 'list all permissions')
PERMISSION_LIST_SWITCHES = PermissionWrapper(
'list_switches', 'list switches', 'list all switches')
PERMISSION_ADD_SWITCH = PermissionWrapper(
'add_switch', 'add switch', 'add switch')
PERMISSION_DEL_SWITCH = PermissionWrapper(
'delete_switch', 'delete switch', 'delete switch')
PERMISSION_LIST_MACHINES = PermissionWrapper(
'list_machines', 'list machines', 'list machines')
PERMISSION_ADD_MACHINE = PermissionWrapper(
'add_machine', 'add machine', 'add machine')
PERMISSION_DEL_MACHINE = PermissionWrapper(
'delete_machine', 'delete machine', 'delete machine')
PERMISSION_LIST_ADAPTERS = PermissionWrapper(
'list_adapters', 'list adapters', 'list adapters')
PERMISSION_LIST_METADATAS = PermissionWrapper(
'list_metadatas', 'list metadatas', 'list metadatas')
PERMISSIONS = [
PERMISSION_LIST_PERMISSIONS,
PERMISSION_LIST_SWITCHES,
PERMISSION_ADD_SWITCH,
PERMISSION_DEL_SWITCH,
PERMISSION_LIST_MACHINES,
PERMISSION_ADD_MACHINE,
PERMISSION_DEL_MACHINE,
PERMISSION_LIST_ADAPTERS,
PERMISSION_LIST_METADATAS
]
def list_permissions_internal(session, **filters):
"""internal functions used only by other db.api modules."""
return utils.list_db_objects(session, models.Permission, **filters)
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters(optional_support_keys=SUPPORTED_FIELDS)
def list_permissions(lister, **filters):
"""list permissions."""
from compass.db.api import user as user_api
with database.session() as session:
user_api.check_user_permission_internal(
session, lister, PERMISSION_LIST_PERMISSIONS
)
return [
permission.to_dict()
for permission in utils.list_db_objects(
session, models.Permission, **filters
)
]
@utils.wrap_to_dict(RESP_FIELDS)
@utils.supported_filters()
def get_permission(getter, permission_id, **kwargs):
"""get permissions."""
from compass.db.api import user as user_api
with database.session() as session:
user_api.check_user_permission_internal(
session, getter, PERMISSION_LIST_PERMISSIONS
)
permission = utils.get_db_object(
session, models.Permission, id=permission_id
)
return permission.to_dict()
def add_permissions_internal(session):
"""internal functions used by other db.api modules only."""
permissions = []
with session.begin(subtransactions=True):
for permission in PERMISSIONS:
permissions.append(
utils.add_db_object(
session, models.Permission,
True,
permission.name,
alias=permission.alias,
description=permission.description
)
)
return permissions
|
{
"content_hash": "f0292fb2231c401789786ecf4447d0d2",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 71,
"avg_line_length": 32.93577981651376,
"alnum_prop": 0.6573816155988857,
"repo_name": "kidchang/compassv2-api",
"id": "3e3166263964b9c08aa64620e797566033ffbea8",
"size": "4178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compass/db/api/permission.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "684186"
},
{
"name": "Ruby",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "65645"
}
],
"symlink_target": ""
}
|
import json
import os
from flask import Flask
from flask import jsonify
from flask import render_template
from flask import request
from flask import url_for
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
credentials = GoogleCredentials.get_application_default()
api = discovery.build('ml', 'v1',
credentials=credentials, cache_discovery=False)
project = os.environ['GOOGLE_CLOUD_PROJECT']
model_name = os.getenv('MODEL_NAME', 'babyweight')
app = Flask(__name__)
def get_prediction(features):
input_data = {'instances': [features]}
parent = 'projects/%s/models/%s' % (project, model_name)
prediction = api.projects().predict(body=input_data, name=parent).execute()
return prediction['predictions'][0]['weight'][0]
@app.route('/')
def index():
return render_template('index.html')
@app.route('/form')
def input_form():
return render_template('form.html')
@app.route('/api/predict', methods=['POST'])
def predict():
def gender2str(val):
genders = {'unknown': 'Unknown', 'male': 'True', 'female': 'False'}
return genders[val]
def plurality2str(val):
pluralities = {'1': 'Single(1)', '2': 'Twins(2)', '3': 'Triplets(3)'}
if features['is_male'] == 'Unknown' and int(val) > 1:
return 'Multiple(2+)'
return pluralities[val]
data = json.loads(request.data.decode())
mandatory_items = ['baby_gender', 'mother_age',
'plurality', 'gestation_weeks']
for item in mandatory_items:
if item not in data.keys():
return jsonify({'result': 'Set all items.'})
features = {}
features['is_male'] = gender2str(data['baby_gender'])
features['mother_age'] = float(data['mother_age'])
features['plurality'] = plurality2str(data['plurality'])
features['gestation_weeks'] = float(data['gestation_weeks'])
prediction = get_prediction(features)
return jsonify({'result': '{:.2f} lbs.'.format(prediction)})
|
{
"content_hash": "b8d8b32cb24923563eb1a74194e6c461",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 77,
"avg_line_length": 29.318181818181817,
"alnum_prop": 0.6790697674418604,
"repo_name": "GoogleCloudPlatform/training-data-analyst",
"id": "c23497fc60a47b1686ca9ccb5292101ed24124cc",
"size": "2535",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "courses/machine_learning/deepdive2/production_ml/babyweight/application/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39536"
},
{
"name": "C#",
"bytes": "23445"
},
{
"name": "C++",
"bytes": "30926"
},
{
"name": "CSS",
"bytes": "53087"
},
{
"name": "Dockerfile",
"bytes": "90856"
},
{
"name": "Go",
"bytes": "93755"
},
{
"name": "HCL",
"bytes": "73891"
},
{
"name": "HTML",
"bytes": "2342167"
},
{
"name": "Java",
"bytes": "2441030"
},
{
"name": "JavaScript",
"bytes": "3957504"
},
{
"name": "Jinja",
"bytes": "257585"
},
{
"name": "Jsonnet",
"bytes": "5696"
},
{
"name": "Jupyter Notebook",
"bytes": "242016061"
},
{
"name": "Makefile",
"bytes": "12642"
},
{
"name": "PigLatin",
"bytes": "11558"
},
{
"name": "Pug",
"bytes": "457977"
},
{
"name": "Python",
"bytes": "18543833"
},
{
"name": "R",
"bytes": "68"
},
{
"name": "Scala",
"bytes": "27161"
},
{
"name": "Shell",
"bytes": "763259"
},
{
"name": "TypeScript",
"bytes": "66858"
}
],
"symlink_target": ""
}
|
from django.forms import widgets
from rest_framework import serializers
from inventory.models import InventoryItem
from server.models import *
class InventoryItemSerializer(serializers.ModelSerializer):
class Meta:
model = InventoryItem
exclude = ('machine',)
class BusinessUnitSerializer(serializers.ModelSerializer):
class Meta:
model = BusinessUnit
class MachineGroupSerializer(serializers.ModelSerializer):
#business_unit = BusinessUnitSerializer()
class Meta:
model = MachineGroup
class FactSerializer(serializers.ModelSerializer):
class Meta:
model = Fact
exclude = ('machine',)
class ConditionSerializer(serializers.ModelSerializer):
class Meta:
model = Condition
exclude = ('machine',)
class PendingAppleUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = PendingAppleUpdate
exclude = ('machine',)
class PendingUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = PendingUpdate
exclude = ('machine',)
class MachineSerializer(serializers.ModelSerializer):
facts = FactSerializer(many=True, required=False)
conditions = ConditionSerializer(many=True, required=False)
pending_apple_updates = PendingAppleUpdateSerializer(many=True, required=False)
pending_updates = PendingUpdateSerializer(many=True, required=False)
class Meta:
model = Machine
|
{
"content_hash": "d76c8760b63c45cd4f1f2a63afce04f6",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 83,
"avg_line_length": 30.1875,
"alnum_prop": 0.7301587301587301,
"repo_name": "chasetb/sal",
"id": "48321da7f3a72c42298c668b65a7f09cf44c2411",
"size": "1449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "192288"
},
{
"name": "HTML",
"bytes": "119776"
},
{
"name": "JavaScript",
"bytes": "683793"
},
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Nginx",
"bytes": "1946"
},
{
"name": "Python",
"bytes": "346909"
},
{
"name": "Shell",
"bytes": "1964"
}
],
"symlink_target": ""
}
|
from django.views.generic import ListView, DetailView, View
from django.views.generic.detail import SingleObjectMixin
from core.views import ObjectApiMixin
from puzzles.models import Puzzle, Source
# Mixins
# Classes
class PuzzleListView(ListView):
model = Puzzle
class PuzzleDetailView(DetailView):
context_object_name = "puzzle"
model = Puzzle
def get_context_data(self, **kwargs):
context = super(PuzzleDetailView, self).get_context_data(**kwargs)
# Get all the clues and number them
puzzle = self.get_object()
context['clues'] = puzzle.get_clues()
return context
# APIs
class PuzzleObjectApiView(ObjectApiMixin, View):
model = Puzzle
|
{
"content_hash": "715b5a58288b7e9f9791ee4d3128775a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 74,
"avg_line_length": 23.6,
"alnum_prop": 0.7146892655367232,
"repo_name": "kpeatt/wordmist",
"id": "e152a81adac482b5b01c6ece9ccb77c8107c8745",
"size": "708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crossword/puzzles/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "44"
},
{
"name": "Python",
"bytes": "45478"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import itertools
import json
import os
import re
import shutil
import tempfile
import time
import unittest
import urllib
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from bs4 import BeautifulSoup
import flask
import mock
import PIL.Image
from urlparse import urlparse
from digits.config import config_value
import digits.dataset.images.classification.test_views
import digits.test_views
import digits.webapp
# Must import after importing digit.config
import caffe_pb2
# May be too short on a slow system
TIMEOUT_DATASET = 45
TIMEOUT_MODEL = 60
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(digits.test_views.BaseViewsTest):
"""
Provides some functions
"""
CAFFE_NETWORK = \
"""
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
-- adjust to number of classes
local nclasses = p.nclasses or 1
-- model should adjust to any 3D input
local nDim = 1
if p.inputShape then p.inputShape:apply(function(x) nDim=nDim*x end) end
local model = nn.Sequential()
model:add(nn.View(-1):setNumInputDims(3)) -- c*h*w -> chw (flattened)
-- set all weights and biases to zero as this speeds learning up
-- for the type of problem we're trying to solve in this test
local linearLayer = nn.Linear(nDim, nclasses)
linearLayer.weight:fill(0)
linearLayer.bias:fill(0)
model:add(linearLayer) -- chw -> nclasses
model:add(nn.LogSoftMax())
return {
model = model
}
end
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTest, cls).setUpClass()
if cls.FRAMEWORK=='torch' and not config_value('torch_root'):
raise unittest.SkipTest('Torch not found')
@classmethod
def model_exists(cls, job_id):
return cls.job_exists(job_id, 'models')
@classmethod
def model_status(cls, job_id):
return cls.job_status(job_id, 'models')
@classmethod
def abort_model(cls, job_id):
return cls.abort_job(job_id, job_type='models')
@classmethod
def model_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'models'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_MODEL
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_model(cls, job_id):
return cls.delete_job(job_id, job_type='models')
@classmethod
def network(cls):
return cls.TORCH_NETWORK if cls.FRAMEWORK=='torch' else cls.CAFFE_NETWORK
class BaseViewsTestWithDataset(BaseViewsTest,
digits.dataset.images.classification.test_views.BaseViewsTestWithDataset):
"""
Provides a dataset
"""
# Inherited classes may want to override these attributes
CROP_SIZE = None
TRAIN_EPOCHS = 1
SHUFFLE = False
LR_POLICY = None
LR_MULTISTEP_VALUES = None
LEARNING_RATE = None
AUG_FLIP = None
AUG_QUAD_ROT = None
AUG_ROT = None
AUG_SCALE = None
AUG_NOISE = None
AUG_HSV_USE = None
AUG_HSV_H = None
AUG_HSV_S = None
AUG_HSV_V = None
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithDataset, cls).setUpClass()
cls.created_models = []
@classmethod
def tearDownClass(cls):
# delete any created datasets
for job_id in cls.created_models:
cls.delete_model(job_id)
super(BaseViewsTestWithDataset, cls).tearDownClass()
@classmethod
def create_model(cls, network=None, **kwargs):
"""
Create a model
Returns the job_id
Raise RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
if network is None:
network = cls.network()
data = {
'model_name': 'test_model',
'dataset': cls.dataset_id,
'method': 'custom',
'custom_network': network,
'batch_size': 10,
'train_epochs': cls.TRAIN_EPOCHS,
'framework' : cls.FRAMEWORK,
'random_seed': 0xCAFEBABE,
'shuffle': 'true' if cls.SHUFFLE else 'false'
}
if cls.CROP_SIZE is not None:
data['crop_size'] = cls.CROP_SIZE
if cls.LR_POLICY is not None:
data['lr_policy'] = cls.LR_POLICY
if cls.LEARNING_RATE is not None:
data['learning_rate'] = cls.LEARNING_RATE
if cls.LR_MULTISTEP_VALUES is not None:
data['lr_multistep_values'] = cls.LR_MULTISTEP_VALUES
if cls.AUG_FLIP is not None:
data['aug_flip'] = cls.AUG_FLIP
if cls.AUG_QUAD_ROT is not None:
data['aug_quad_rot'] = cls.AUG_QUAD_ROT
if cls.AUG_ROT is not None:
data['aug_rot'] = cls.AUG_ROT
if cls.AUG_SCALE is not None:
data['aug_scale'] = cls.AUG_SCALE
if cls.AUG_NOISE is not None:
data['aug_noise'] = cls.AUG_NOISE
if cls.AUG_HSV_USE is not None:
data['aug_hsv_use'] = cls.AUG_HSV_USE
if cls.AUG_HSV_H is not None:
data['aug_hsv_h'] = cls.AUG_HSV_H
if cls.AUG_HSV_S is not None:
data['aug_hsv_s'] = cls.AUG_HSV_S
if cls.AUG_HSV_V is not None:
data['aug_hsv_v'] = cls.AUG_HSV_V
data.update(kwargs)
request_json = data.pop('json', False)
url = '/models/images/classification'
if request_json:
url += '.json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
print json.loads(rv.data)
raise RuntimeError('Model creation failed with %s' % rv.status_code)
data = json.loads(rv.data)
if 'jobs' in data.keys():
return [j['id'] for j in data['jobs']]
else:
return data['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
print 'Status code:', rv.status_code
s = BeautifulSoup(rv.data, 'html.parser')
div = s.select('div.alert-danger')
if div:
print div[0]
else:
print rv.data
raise RuntimeError('Failed to create dataset - status %s' % rv.status_code)
job_id = cls.job_id_from_response(rv)
assert cls.model_exists(job_id), 'model not found after successful creation'
cls.created_models.append(job_id)
return job_id
class BaseViewsTestWithModel(BaseViewsTestWithDataset):
"""
Provides a model
"""
@classmethod
def setUpClass(cls):
super(BaseViewsTestWithModel, cls).setUpClass()
cls.model_id = cls.create_model(json=True)
assert cls.model_wait_completion(cls.model_id) == 'Done', 'create failed'
class BaseTestViews(BaseViewsTest):
"""
Tests which don't require a dataset or a model
"""
def test_page_model_new(self):
rv = self.app.get('/models/images/classification/new')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert 'New Image Classification Model' in rv.data, 'unexpected page format'
def test_nonexistent_model(self):
assert not self.model_exists('foo'), "model shouldn't exist"
def test_visualize_network(self):
rv = self.app.post('/models/visualize-network?framework='+self.FRAMEWORK,
data = {'custom_network': self.network()}
)
s = BeautifulSoup(rv.data, 'html.parser')
if rv.status_code != 200:
body = s.select('body')[0]
if 'InvocationException' in str(body):
raise unittest.SkipTest('GraphViz not installed')
raise AssertionError('POST failed with %s\n\n%s' % (rv.status_code, body))
image = s.select('img')
assert image is not None, "didn't return an image"
def test_customize(self):
rv = self.app.post('/models/customize?network=lenet&framework='+self.FRAMEWORK)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
class BaseTestCreation(BaseViewsTestWithDataset):
"""
Model creation tests
"""
def test_create_json(self):
job_id = self.create_model(json=True)
self.abort_model(job_id)
def test_create_delete(self):
job_id = self.create_model()
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_wait_delete(self):
job_id = self.create_model()
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_create_abort_delete(self):
job_id = self.create_model()
assert self.abort_model(job_id) == 200, 'abort failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
def test_snapshot_interval_2(self):
job_id = self.create_model(snapshot_interval=0.5)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) > 1, 'should take >1 snapshot'
def test_snapshot_interval_0_5(self):
job_id = self.create_model(train_epochs=4, snapshot_interval=2)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']) == 2, 'should take 2 snapshots'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe_root')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
config_value('caffe_root')['multi_gpu'],
'multi-GPU enabled')
def test_select_gpu(self):
for index in config_value('gpu_list').split(','):
yield self.check_select_gpu, index
def check_select_gpu(self, gpu_index):
job_id = self.create_model(select_gpu=gpu_index)
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
@unittest.skipIf(
not config_value('gpu_list'),
'no GPUs selected')
@unittest.skipIf(
not config_value('caffe_root')['cuda_enabled'],
'CUDA disabled')
@unittest.skipIf(
not config_value('caffe_root')['multi_gpu'],
'multi-GPU disabled')
def test_select_gpus(self):
# test all possible combinations
gpu_list = config_value('gpu_list').split(',')
for i in xrange(len(gpu_list)):
for combination in itertools.combinations(gpu_list, i+1):
yield self.check_select_gpus, combination
def check_select_gpus(self, gpu_list):
job_id = self.create_model(select_gpus_list=','.join(gpu_list), batch_size=len(gpu_list))
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
def classify_one_for_job(self, job_id, test_misclassification = True):
# carry out one inference test per category in dataset
for category in self.imageset_paths.keys():
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one?job_id=%s' % job_id,
data = {
'image_file': image_upload,
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
# gets an array of arrays [[confidence, label],...]
predictions = [p.get_text().split() for p in s.select('ul.list-group li')]
if test_misclassification:
assert predictions[0][1] == category, 'image misclassified'
def test_classify_one_mean_image(self):
# test the creation
job_id = self.create_model(use_mean = 'image')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.classify_one_for_job(job_id)
def test_classify_one_mean_pixel(self):
# test the creation
job_id = self.create_model(use_mean = 'pixel')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.classify_one_for_job(job_id)
def test_classify_one_mean_none(self):
# test the creation
job_id = self.create_model(use_mean = 'none')
assert self.model_wait_completion(job_id) == 'Done', 'job failed'
self.classify_one_for_job(job_id, False)
def test_retrain(self):
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options = {
'method': 'previous',
'previous_networks': job1_id,
}
options['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
def test_retrain_twice(self):
# retrain from a job which already had a pretrained model
job1_id = self.create_model()
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
options_2 = {
'method': 'previous',
'previous_networks': job1_id,
}
options_2['%s-snapshot' % job1_id] = content['snapshots'][-1]
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
options_3 = {
'method': 'previous',
'previous_networks': job2_id,
}
options_3['%s-snapshot' % job2_id] = -1
job3_id = self.create_model(**options_3)
assert self.model_wait_completion(job3_id) == 'Done', 'third job failed'
def test_bad_network_definition(self):
if self.FRAMEWORK == 'caffe':
bogus_net = """
layer {
name: "hidden"
type: 'BogusCode'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
elif self.FRAMEWORK == 'torch':
bogus_net = """
local model = BogusCode(0)
return function(params)
return {
model = model
}
end
"""
job_id = self.create_model(json=True, network=bogus_net)
assert self.model_wait_completion(job_id) == 'Error', 'job should have failed'
job_info = self.job_info_html(job_id=job_id, job_type='models')
assert 'BogusCode' in job_info, "job_info: \n%s" % str(job_info)
def test_clone(self):
options_1 = {
'shuffle': True,
'snapshot_interval': 2.0,
'lr_step_size': 33.0,
'lr_inv_power': 0.5,
'lr_inv_gamma': 0.1,
'lr_poly_power': 3.0,
'lr_exp_gamma': 0.9,
'use_mean': 'image',
'lr_multistep_gamma': 0.5,
'lr_policy': 'exp',
'val_interval': 3.0,
'random_seed': 123,
'learning_rate': 0.0125,
'lr_step_gamma': 0.1,
'lr_sigmoid_step': 50.0,
'lr_sigmoid_gamma': 0.1,
'lr_multistep_values': '50,85',
}
job1_id = self.create_model(**options_1)
assert self.model_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content1 = json.loads(rv.data)
## Clone job1 as job2
options_2 = {
'clone': job1_id,
}
job2_id = self.create_model(**options_2)
assert self.model_wait_completion(job2_id) == 'Done', 'second job failed'
rv = self.app.get('/models/%s.json' % job2_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content2 = json.loads(rv.data)
## These will be different
content1.pop('id')
content2.pop('id')
content1.pop('directory')
content2.pop('directory')
content1.pop('creation time')
content2.pop('creation time')
content1.pop('job id')
content2.pop('job id')
assert (content1 == content2), 'job content does not match'
job1 = digits.webapp.scheduler.get_job(job1_id)
job2 = digits.webapp.scheduler.get_job(job2_id)
assert (job1.form_data == job2.form_data), 'form content does not match'
class BaseTestCreated(BaseViewsTestWithModel):
"""
Tests on a model that has already been created
"""
def test_save(self):
job = digits.webapp.scheduler.get_job(self.model_id)
assert job.save(), 'Job failed to save'
def test_get_snapshot(self):
job = digits.webapp.scheduler.get_job(self.model_id)
task = job.train_task()
f = task.get_snapshot(-1)
assert f, "Failed to load snapshot"
filename = task.get_snapshot_filename(-1)
assert filename, "Failed to get filename"
def test_download(self):
for extension in ['tar', 'zip', 'tar.gz', 'tar.bz2']:
yield self.check_download, extension
def check_download(self, extension):
url = '/models/%s/download.%s' % (self.model_id, extension)
rv = self.app.get(url)
assert rv.status_code == 200, 'download "%s" failed with %s' % (url, rv.status_code)
def test_index_json(self):
rv = self.app.get('/index.json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for m in content['models']:
if m['id'] == self.model_id:
found = True
break
assert found, 'model not found in list'
def test_model_json(self):
rv = self.app.get('/models/%s.json' % self.model_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert content['id'] == self.model_id, 'id %s != %s' % (content['id'], self.model_id)
assert content['dataset_id'] == self.dataset_id, 'dataset_id %s != %s' % (content['dataset_id'], self.dataset_id)
assert len(content['snapshots']) > 0, 'no snapshots in list'
def test_edit_name(self):
status = self.edit_job(
self.dataset_id,
name='new name'
)
assert status == 200, 'failed with %s' % status
def test_edit_notes(self):
status = self.edit_job(
self.dataset_id,
notes='new notes'
)
assert status == 200, 'failed with %s' % status
def test_classify_one(self):
# test first image in first category
category = self.imageset_paths.keys()[0]
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one?job_id=%s' % self.model_id,
data = {
'image_file': image_upload,
'show_visualizations': 'y',
}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
# gets an array of arrays [[confidence, label],...]
predictions = [p.get_text().split() for p in s.select('ul.list-group li')]
assert predictions[0][1] == category, 'image misclassified'
def test_classify_one_json(self):
# test last image in last category
category = self.imageset_paths.keys()[-1]
image_path = self.imageset_paths[category][-1]
image_path = os.path.join(self.imageset_folder, image_path)
with open(image_path,'rb') as infile:
# StringIO wrapping is needed to simulate POST file upload.
image_upload = (StringIO(infile.read()), 'image.png')
rv = self.app.post(
'/models/images/classification/classify_one.json?job_id=%s' % self.model_id,
data = {
'image_file': image_upload,
'show_visualizations': 'y',
}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert data['predictions'][0][0] == category, 'image misclassified'
def test_classify_many(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_classify_many_from_folder(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many?job_id=%s' % self.model_id,
data = {'image_list': file_upload, 'image_folder': self.imageset_folder}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_classify_many_invalid_ground_truth(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
# test label_id with -1 and >len(labels)
textfile_images += '%s %s\n' % (image_path, 3*label_id-1)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
def test_classify_many_json(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/classify_many.json?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
assert rv.status_code == 200, 'POST failed with %s' % rv.status_code
data = json.loads(rv.data)
assert 'classifications' in data, 'invalid response'
# verify classification of first image in each category
for category in self.imageset_paths.keys():
image_path = self.imageset_paths[category][0]
image_path = os.path.join(self.imageset_folder, image_path)
prediction = data['classifications'][image_path][0][0]
assert prediction == category, 'image misclassified- predicted %s - expected %s' % (prediction, category)
def test_top_n(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
image_path = os.path.join(self.imageset_folder, image_path)
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/top_n?job_id=%s' % self.model_id,
data = {'image_list': file_upload}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
keys = self.imageset_paths.keys()
for key in keys:
assert key in rv.data, '"%s" not found in the response'
def test_top_n_from_folder(self):
textfile_images = ''
label_id = 0
for label, images in self.imageset_paths.iteritems():
for image in images:
image_path = image
textfile_images += '%s %d\n' % (image_path, label_id)
label_id += 1
# StringIO wrapping is needed to simulate POST file upload.
file_upload = (StringIO(textfile_images), 'images.txt')
rv = self.app.post(
'/models/images/classification/top_n?job_id=%s' % self.model_id,
data = {'image_list': file_upload, 'image_folder': self.imageset_folder}
)
s = BeautifulSoup(rv.data, 'html.parser')
body = s.select('body')
assert rv.status_code == 200, 'POST failed with %s\n\n%s' % (rv.status_code, body)
keys = self.imageset_paths.keys()
for key in keys:
assert key in rv.data, '"%s" not found in the response'
class BaseTestDatasetModelInteractions(BaseViewsTestWithDataset):
"""
Test the interactions between datasets and models
"""
# If you try to create a model using a deleted dataset, it should fail
def test_create_model_deleted_dataset(self):
dataset_id = self.create_dataset()
assert self.delete_dataset(dataset_id) == 200, 'delete failed'
assert not self.dataset_exists(dataset_id), 'dataset exists after delete'
try:
model_id = self.create_model(dataset=dataset_id)
except RuntimeError:
return
assert False, 'Should have failed'
# If you try to create a model using a running dataset,
# it should wait to start until the dataset is completed
def test_create_model_running_dataset(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
# Model should be in WAIT status while dataset is running
# Copying functionality from job_wait_completion ...
start_time = time.time()
timeout = TIMEOUT_DATASET
dataset_status = self.dataset_status(dataset_id)
while dataset_status != 'Done':
model_status = self.model_status(model_id)
if model_status == 'Initialized':
# give it some time ...
pass
elif model_status == 'Waiting':
# That's what we were waiting for
break
else:
raise Exception('Model not waiting - "%s"' % model_status)
assert (time.time() - start_time) < timeout, 'Job took more than %s seconds' % timeout
time.sleep(0.5)
dataset_status = self.dataset_status(dataset_id)
# Model should switch to RUN status after dataset is DONE
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
time.sleep(1)
assert self.model_status(model_id) in ['Running', 'Done'], "model didn't start"
self.abort_model(model_id)
# If you try to delete a completed dataset with a dependent model, it should fail
def test_delete_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.dataset_wait_completion(dataset_id) == 'Done', 'dataset creation failed'
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_model(model_id)
# If you try to delete a running dataset with a dependent model, it should fail
def test_delete_running_dataset_dependent_model(self):
dataset_id = self.create_dataset()
model_id = self.create_model(dataset=dataset_id)
assert self.delete_dataset(dataset_id) == 403, 'dataset deletion should not have succeeded'
self.abort_dataset(dataset_id)
self.abort_model(model_id)
class BaseTestCreatedWide(BaseTestCreated):
IMAGE_WIDTH = 20
class BaseTestCreatedTall(BaseTestCreated):
IMAGE_HEIGHT = 20
class BaseTestCreatedCropInForm(BaseTestCreated):
CROP_SIZE = 8
class BaseTestCreatedDataAug(BaseTestCreatedTall):
AUG_FLIP = 'fliplrud'
AUG_QUAD_ROT = 'rotall'
AUG_ROT = 45
AUG_SCALE = 0.07
AUG_NOISE = 0.03
AUG_HSV_USE = True
AUG_HSV_H = 0.02
AUG_HSV_S = 0.04
AUG_HSV_V = 0.06
class BaseTestCreatedCropInNetwork(BaseTestCreated):
CAFFE_NETWORK = \
"""
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
crop_size: 8
}
}
layer {
name: "data"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
crop_size: 8
}
}
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
TORCH_NETWORK = \
"""
return function(p)
local nclasses = p.nclasses or 1
local croplen = 8, channels
if p.inputShape then channels=p.inputShape[1] else channels=1 end
local model = nn.Sequential()
model:add(nn.View(-1):setNumInputDims(3)) -- flatten
local linLayer = nn.Linear(channels*croplen*croplen, nclasses)
linLayer.weight:fill(0)
linLayer.bias:fill(0)
model:add(linLayer) -- chw -> nclasses
model:add(nn.LogSoftMax())
return {
model = model,
croplen = croplen
}
end
"""
################################################################################
# Test classes
################################################################################
class TestCaffeViews(BaseTestViews):
FRAMEWORK = 'caffe'
class TestCaffeCreation(BaseTestCreation):
FRAMEWORK = 'caffe'
class TestCaffeCreatedWideMoreNumOutput(BaseTestCreatedWide):
FRAMEWORK = 'caffe'
CAFFE_NETWORK = \
"""
layer {
name: "hidden"
type: 'InnerProduct'
bottom: "data"
top: "output"
inner_product_param {
num_output: 1000
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
class TestCaffeDatasetModelInteractions(BaseTestDatasetModelInteractions):
FRAMEWORK = 'caffe'
class TestCaffeCreatedCropInForm(BaseTestCreatedCropInForm):
FRAMEWORK = 'caffe'
class TestCaffeCreatedCropInNetwork(BaseTestCreatedCropInNetwork):
FRAMEWORK = 'caffe'
class TestCaffeCreatedTallMultiStepLR(BaseTestCreatedTall):
FRAMEWORK = 'caffe'
LR_POLICY = 'multistep'
LR_MULTISTEP_VALUES = '50,75,90'
class TestTorchViews(BaseTestViews):
FRAMEWORK = 'torch'
class TestTorchCreation(BaseTestCreation):
FRAMEWORK = 'torch'
class TestTorchCreatedUnencodedShuffle(BaseTestCreated):
FRAMEWORK = 'torch'
ENCODING = 'none'
SHUFFLE = True
class TestTorchCreatedHdf5(BaseTestCreated):
FRAMEWORK = 'torch'
BACKEND = 'hdf5'
class TestTorchCreatedTallHdf5Shuffle(BaseTestCreatedTall):
FRAMEWORK = 'torch'
BACKEND = 'hdf5'
SHUFFLE = True
class TestTorchDatasetModelInteractions(BaseTestDatasetModelInteractions):
FRAMEWORK = 'torch'
class TestCaffeLeNet(BaseTestCreated):
FRAMEWORK = 'caffe'
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 28
CAFFE_NETWORK=open(
os.path.join(
os.path.dirname(digits.__file__),
'standard-networks', 'caffe', 'lenet.prototxt')
).read()
class TestTorchCreatedCropInForm(BaseTestCreatedCropInForm):
FRAMEWORK = 'torch'
class TestTorchCreatedDataAug(BaseTestCreatedDataAug):
FRAMEWORK = 'torch'
class TestTorchCreatedCropInNetwork(BaseTestCreatedCropInNetwork):
FRAMEWORK = 'torch'
class TestTorchCreatedWideMultiStepLR(BaseTestCreatedWide):
FRAMEWORK = 'torch'
LR_POLICY = 'multistep'
LR_MULTISTEP_VALUES = '50,75,90'
class TestTorchLeNet(BaseTestCreated):
FRAMEWORK = 'torch'
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 28
TRAIN_EPOCHS = 20
# need more aggressive learning rate
# on such a small dataset
LR_POLICY = 'fixed'
LEARNING_RATE = 0.1
# standard lenet model will adjust to color
# or grayscale images
TORCH_NETWORK=open(
os.path.join(
os.path.dirname(digits.__file__),
'standard-networks', 'torch', 'lenet.lua')
).read()
class TestTorchLeNetHdf5Shuffle(TestTorchLeNet):
BACKEND = 'hdf5'
SHUFFLE = True
class TestPythonLayer(BaseViewsTestWithDataset):
FRAMEWORK = 'caffe'
CAFFE_NETWORK = """\
layer {
name: "hidden"
type: 'InnerProduct'
inner_product_param {
num_output: 500
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
bottom: "data"
top: "output"
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output"
bottom: "label"
top: "loss"
exclude { stage: "deploy" }
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "output"
bottom: "label"
top: "accuracy"
include { stage: "val" }
}
layer {
name: "py_test"
type: "Python"
bottom: "output"
top: "py_test"
python_param {
module: "digits_python_layers"
layer: "PythonLayer"
}
}
layer {
name: "softmax"
type: "Softmax"
bottom: "output"
top: "softmax"
include { stage: "deploy" }
}
"""
def write_python_layer_script(self, filename):
with open(filename, 'w') as f:
f.write("""\
import caffe
import numpy as np
class PythonLayer(caffe.Layer):
def setup(self, bottom, top):
print 'PythonLayer::setup'
if len(bottom) != 1:
raise Exception("Need one input.")
def reshape(self, bottom, top):
print 'PythonLayer::reshape'
top[0].reshape(1)
def forward(self, bottom, top):
print 'PythonLayer::forward'
top[0].data[...] = np.sum(bottom[0].data) / 2. / bottom[0].num
""")
## This test makes a temporary python layer file whose path is set
## as py_layer_server_file. The job creation process copies that
## file to the job_dir. The CAFFE_NETWORK above, requires that
## python script to be in the correct spot. If there is an error
## in the script or if the script is named incorrectly, or does
## not exist in the job_dir, then the test will fail.
def test_python_layer(self):
tmpdir = tempfile.mkdtemp()
py_file = tmpdir + '/py_test.py'
self.write_python_layer_script(py_file)
job_id = self.create_model(python_layer_server_file=py_file)
# remove the temporary python script.
shutil.rmtree(tmpdir)
assert self.model_wait_completion(job_id) == 'Done', 'first job failed'
rv = self.app.get('/models/%s.json' % job_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content = json.loads(rv.data)
assert len(content['snapshots']), 'should have at least snapshot'
class TestSweepCreation(BaseViewsTestWithDataset):
FRAMEWORK = 'caffe'
"""
Model creation tests
"""
def test_sweep(self):
job_ids = self.create_model(json=True, learning_rate='[0.01, 0.02]', batch_size='[8, 10]')
for job_id in job_ids:
assert self.model_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_model(job_id) == 200, 'delete failed'
assert not self.model_exists(job_id), 'model exists after delete'
|
{
"content_hash": "446c69940678eb11d68499d78e724354",
"timestamp": "",
"source": "github",
"line_count": 1192,
"max_line_length": 121,
"avg_line_length": 34.27768456375839,
"alnum_prop": 0.5811449129934654,
"repo_name": "dongjoon-hyun/DIGITS",
"id": "5ab2f427575d42f32eaac25e5497ea7995ab094e",
"size": "40928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "digits/model/images/classification/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2709"
},
{
"name": "HTML",
"bytes": "230775"
},
{
"name": "JavaScript",
"bytes": "133292"
},
{
"name": "Lua",
"bytes": "12103"
},
{
"name": "Python",
"bytes": "747365"
},
{
"name": "Shell",
"bytes": "4903"
}
],
"symlink_target": ""
}
|
from reportlab.tools.docco.rl_doc_utils import *
heading1("Exposing PDF Special Capabilities")
disc("""PDF provides a number of features to make electronic
document viewing more efficient and comfortable, and
our library exposes a number of these.""")
heading2("Forms")
disc("""The Form feature lets you create a block of graphics and text
once near the start of a PDF file, and then simply refer to it on
subsequent pages. If you are dealing with a run of 5000 repetitive
business forms - for example, one-page invoices or payslips - you
only need to store the backdrop once and simply draw the changing
text on each page. Used correctly, forms can dramatically cut
file size and production time, and apparently even speed things
up on the printer.
""")
disc("""Forms do not need to refer to a whole page; anything which
might be repeated often should be placed in a form.""")
disc("""The example below shows the basic sequence used. A real
program would probably define the forms up front and refer to
them from another location.""")
eg(examples.testforms)
heading2("Links and Destinations")
disc("""PDF supports internal hyperlinks. There is a very wide
range of link types, destination types and events which
can be triggered by a click. At the moment we just
support the basic ability to jump from one part of a document
to another, and to control the zoom level of the window after
the jump. The bookmarkPage method defines a destination that
is the endpoint of a jump.""")
#todo("code example here...")
eg("""
canvas.bookmarkPage(name,
fit="Fit",
left=None,
top=None,
bottom=None,
right=None,
zoom=None
)
""")
disc("""
By default the $bookmarkPage$ method defines the page itself as the
destination. After jumping to an endpoint defined by bookmarkPage,
the PDF browser will display the whole page, scaling it to fit the
screen:""")
eg("""canvas.bookmarkPage(name)""")
disc("""The $bookmarkPage$ method can be instructed to display the
page in a number of different ways by providing a $fit$
parameter.""")
eg("")
t = Table([
['fit','Parameters Required','Meaning'],
['Fit',None,'Entire page fits in window (the default)'],
['FitH','top','Top coord at top of window, width scaled to fit'],
['FitV','left','Left coord at left of window, height scaled to fit'],
['FitR','left bottom right top','Scale window to fit the specified rectangle'],
['XYZ','left top zoom','Fine grained control. If you omit a parameter\nthe PDF browser interprets it as "leave as is"']
])
t.setStyle(TableStyle([
('FONT',(0,0),(-1,1),'Times-Bold',10,12),
('VALIGN',(0,0),(-1,-1),'MIDDLE'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
]))
getStory().append(t)
caption("""Table <seq template="%(Chapter)s-%(Table+)s"/> - Required attributes for different fit types""")
disc("""
Note : $fit$ settings are case-sensitive so $fit="FIT"$ is invalid
""")
disc("""
Sometimes you want the destination of a jump to be some part of a page.
The $FitR$ fit allows you to identify a particular rectangle, scaling
the area to fit the entire page.
""")
disc("""
To set the display to a particular x and y coordinate of the page and to
control the zoom directly use fit="XYZ".
""")
eg("""
canvas.bookmarkPage('my_bookmark',fit="XYZ",left=0,top=200)
""")
disc("""
This destination is at the leftmost of the page with the top of the screen
at position 200. Because $zoom$ was not set the zoom remains at whatever the
user had it set to.
""")
eg("""
canvas.bookmarkPage('my_bookmark',fit="XYZ",left=0,top=200,zoom=2)
""")
disc("""This time zoom is set to expand the page 2X its normal size.""")
disc("""
Note : Both $XYZ$ and $FitR$ fit types require that their positional parameters
($top, bottom, left, right$) be specified in terms of the default user space.
They ignore any geometric transform in effect in the canvas graphic state.
""")
pencilnote()
disc("""
<i>Note:</i> Two previous bookmark methods are supported but deprecated now
that bookmarkPage is so general. These are $bookmarkHorizontalAbsolute$
and $bookmarkHorizontal$.
""")
heading3("Defining internal links")
eg("""
canvas.linkAbsolute(contents, destinationname, Rect=None, addtopage=1, name=None,
thickness=0, color=None, dashArray=None, **kw)
""")
disc("""
The $linkAbsolute$ method defines a starting point for a jump. When the user
is browsing the generated document using a dynamic viewer (such as Acrobat Reader)
when the mouse is clicked when the pointer is within the rectangle specified
by $Rect$ the viewer will jump to the endpoint associated with $destinationname$.
As in the case with $bookmarkHorizontalAbsolute$ the rectangle $Rect$ must be
specified in terms of the default user space. The $contents$ parameter specifies
a chunk of text which displays in the viewer if the user left-clicks on the region.
""")
disc("""
The rectangle $Rect$ must be specified in terms of a tuple ^(x1,y1,x2,y2)^ identifying
the lower left and upper right points of the rectangle in default user space.
""")
disc("""
For example the code
""")
eg("""
canvas.bookmarkPage("Meaning_of_life")
""")
disc("""
defines a location as the whole of the current page with the identifier
$Meaning_of_life$. To create a rectangular link to it while drawing a possibly
different page, we would use this code:
""")
eg("""
canvas.linkAbsolute("Find the Meaning of Life", "Meaning_of_life",
(inch, inch, 6*inch, 2*inch))
""")
disc("""
By default during interactive viewing a rectangle appears around the
link. Use the keyword argument $Border='[0 0 0]'$ to
suppress the visible rectangle around the during viewing link.
For example
""")
eg("""
canvas.linkAbsolute("Meaning of Life", "Meaning_of_life",
(inch, inch, 6*inch, 2*inch), Border='[0 0 0]')
""")
disc("""The $thickness$, $color$ and $dashArray$ arguments may be used alternately
to specify a border if no Border argument is specified.
If Border is specified it must be either a string representation of a PDF
array or a $PDFArray$ (see the pdfdoc module). The $color$ argument (which should be a $Color$ instance) is equivalent to a keyword argument $C$ which should resolve to a PDF color definition (Normally a three entry PDF array).
""")
disc("""The $canvas.linkRect$ method is similar in intent to the $linkAbsolute$ method, but has an extra argument $relative=1$ so is intended to obey the local userspace transformation.""")
heading2("Outline Trees")
disc("""Acrobat Reader has a navigation page which can hold a
document outline; it should normally be visible when you
open this guide. We provide some simple methods to add
outline entries. Typically, a program to make a document
(such as this user guide) will call the method
$canvas.addOutlineEntry(^self, title, key, level=0,
closed=None^)$ as it reaches each heading in the document.
""")
disc("""^title^ is the caption which will be displayed in
the left pane. The ^key^ must be a string which is
unique within the document and which names a bookmark,
as with the hyperlinks. The ^level^ is zero - the
uppermost level - unless otherwise specified, and
it is an error to go down more than one level at a time
(for example to follow a level 0 heading by a level 2
heading). Finally, the ^closed^ argument specifies
whether the node in the outline pane is closed
or opened by default.""")
disc("""The snippet below is taken from the document template
that formats this user guide. A central processor looks
at each paragraph in turn, and makes a new outline entry
when a new chapter occurs, taking the chapter heading text
as the caption text. The key is obtained from the
chapter number (not shown here), so Chapter 2 has the
key 'ch2'. The bookmark to which the
outline entry points aims at the whole page, but it could
as easily have been an individual paragraph.
""")
eg("""
#abridged code from our document template
if paragraph.style == 'Heading1':
self.chapter = paragraph.getPlainText()
key = 'ch%d' % self.chapterNo
self.canv.bookmarkPage(key)
self.canv.addOutlineEntry(paragraph.getPlainText(),
key, 0, 0)
""")
heading2("Page Transition Effects")
eg("""
canvas.setPageTransition(self, effectname=None, duration=1,
direction=0,dimension='H',motion='I')
""")
disc("""
The $setPageTransition$ method specifies how one page will be replaced with
the next. By setting the page transition effect to "dissolve" for example
the current page will appear to melt away when it is replaced by the next
page during interactive viewing. These effects are useful in spicing up
slide presentations, among other places.
Please see the reference manual for more detail on how to use this method.
""")
heading2("Internal File Annotations")
eg("""
canvas.setAuthor(name)
canvas.setTitle(title)
canvas.setSubject(subj)
""")
disc("""
These methods have no automatically seen visible effect on the document.
They add internal annotations to the document. These annotations can be
viewed using the "Document Info" menu item of the browser and they also can
be used as a simple standard way of providing basic information about the
document to archiving software which need not parse the entire
file. To find the annotations view the $*.pdf$ output file using a standard
text editor (such as $notepad$ on MS/Windows or $vi$ or $emacs$ on unix) and look
for the string $/Author$ in the file contents.
""")
eg(examples.testannotations)
disc("""
If you want the subject, title, and author to automatically display
in the document when viewed and printed you must paint them onto the
document like any other text.
""")
illust(examples.annotations, "Setting document internal annotations")
|
{
"content_hash": "936c24d2d07a6247d0706fe99b9db3b1",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 227,
"avg_line_length": 37.483636363636364,
"alnum_prop": 0.6961583236321304,
"repo_name": "alexissmirnov/donomo",
"id": "074683d590bb4a3fb5f8ad348028296e22c5eea2",
"size": "10511",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "donomo_archive/lib/reportlab/docs/userguide/ch3_pdffeatures.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "360712"
},
{
"name": "Python",
"bytes": "7155992"
},
{
"name": "Shell",
"bytes": "391"
}
],
"symlink_target": ""
}
|
from .resource import Resource
class LocalNetworkGateway(Resource):
"""A common class for general resource information.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param local_network_address_space: Local network site address space.
:type local_network_address_space:
~azure.mgmt.network.v2018_01_01.models.AddressSpace
:param gateway_ip_address: IP address of local network gateway.
:type gateway_ip_address: str
:param bgp_settings: Local network gateway's BGP speaker settings.
:type bgp_settings: ~azure.mgmt.network.v2018_01_01.models.BgpSettings
:param resource_guid: The resource GUID property of the
LocalNetworkGateway resource.
:type resource_guid: str
:ivar provisioning_state: The provisioning state of the
LocalNetworkGateway resource. Possible values are: 'Updating', 'Deleting',
and 'Failed'.
:vartype provisioning_state: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'local_network_address_space': {'key': 'properties.localNetworkAddressSpace', 'type': 'AddressSpace'},
'gateway_ip_address': {'key': 'properties.gatewayIpAddress', 'type': 'str'},
'bgp_settings': {'key': 'properties.bgpSettings', 'type': 'BgpSettings'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, location: str=None, tags=None, local_network_address_space=None, gateway_ip_address: str=None, bgp_settings=None, resource_guid: str=None, etag: str=None, **kwargs) -> None:
super(LocalNetworkGateway, self).__init__(id=id, location=location, tags=tags, **kwargs)
self.local_network_address_space = local_network_address_space
self.gateway_ip_address = gateway_ip_address
self.bgp_settings = bgp_settings
self.resource_guid = resource_guid
self.provisioning_state = None
self.etag = etag
|
{
"content_hash": "01e0cccadc38a43e199217989210b525",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 213,
"avg_line_length": 43.378787878787875,
"alnum_prop": 0.6423332169053441,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "5ca47b12cb69e97fe1b59f17db2cf48304c01663",
"size": "3337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/local_network_gateway_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import abc
import datetime
from enum import Enum
from collections.abc import Awaitable
from asyncio_mongo import bson
from dateutil.parser import parse
from wdim.orm import query
class Field(metaclass=abc.ABCMeta):
def __init__(self, index=False, unique=False, required=True):
if unique:
index = True
self._name = None
self.index = index
self.unique = unique
# TODO implement
self.required = required
@abc.abstractmethod
def parse(self, value):
raise NotImplemented
def to_document(self, value):
return value
def get_value(self, inst):
return inst._data.get(self._name)
def set_value(self, instance, value):
instance._data[self._name] = value
def __get__(self, inst, owner):
assert self._name is not None, 'Field must be attached to a class'
# inst being None indicates that we're being accessed
# by the class we're attached to, return self for ease of use
if inst is None:
return self
return self.get_value(inst)
def __set__(self, instance, value, override=False):
assert self._name is not None, 'Field must be attached to a class'
if not override:
raise ValueError('Fields are read-only')
return self.set_value(instance, value)
def __eq__(self, value):
return query.Equals(self, value)
def __ne__(self, value):
pass
def __lt__(self, value):
pass
def __gt__(self, value):
pass
def __ge__(self, value):
pass
def __le__(self, value):
pass
class ObjectIdField(Field):
def parse(self, value):
if value is None:
return bson.ObjectId()
if isinstance(value, bson.ObjectId):
return value
return bson.ObjectId(value)
class StringField(Field):
def parse(self, value):
if value is None:
return ''
return str(value)
class BoolField(Field):
def parse(self, value):
return bool(value)
class DictField(Field):
def parse(self, value):
if value is None:
return {}
return dict(value)
class DatetimeField(Field):
def parse(self, value):
if value is None and not self.required:
return None
if isinstance(value, str):
return parse(value)
if isinstance(value, datetime.datetime):
return value
raise Exception('{} must be str or datetime, got {}'.format(self._name, value))
class EnumField(Field):
def __init__(self, enum, **kwargs):
assert issubclass(enum, Enum), 'enum must be of type Enum, got {!r}'.format(enum)
self._enum = enum
super().__init__(**kwargs)
def parse(self, value):
self._enum(value)
return value
def to_document(self, value):
return value.value
def AwaitableLoaderFactory(klass, type_, *args, **kwargs):
assert isinstance(klass, type), 'klass must be a type, got {!r}'.format(klass)
if getattr(type_, '_original_class', None):
return args[0]
class _Awaitable(type_, Awaitable):
_is_coroutine = True
_original_class = type_
def __await__(self):
return klass.load(self).__await__()
__iter__ = __await__
return _Awaitable(*args, **kwargs)
class ForeignField(Field):
@property
def foreign_class(self):
return self.class_getter()
def __init__(self, class_getter, **kwargs):
self.class_getter = class_getter
super().__init__(**kwargs)
def parse(self, value):
if value is None and not self.required:
return None
if isinstance(value, self.foreign_class):
return value._id
if isinstance(value, dict):
value = value['_id']
return self.foreign_class._id.parse(value)
def get_value(self, inst):
_id = super().get_value(inst)
if _id is None:
return None
return AwaitableLoaderFactory(self.foreign_class, type(_id), _id)
|
{
"content_hash": "6b165851d28d1d72eac90820fa0e3c6d",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 89,
"avg_line_length": 23.426136363636363,
"alnum_prop": 0.5891341256366723,
"repo_name": "chrisseto/Still",
"id": "e5af186dde20832c71e2295e309a3dfe77277999",
"size": "4123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wdim/orm/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "57236"
}
],
"symlink_target": ""
}
|
"""Unique operator"""
import tvm
from tvm import te, tir
from ...te import hybrid
from .scan import cumsum
from .sort import sort, argsort
from ..utils import ceil_div
def _get_max_threads(batch_size):
target = tvm.target.Target.current()
max_threads = tvm.target.Target.current(allow_none=False).max_num_threads
if "vulkan" in str(target) and not isinstance(batch_size, tvm.tir.IntImm):
# SPIR-V does not support dynamic thread group size
return max_threads
return tir.min(batch_size, max_threads)
def _calc_adjacent_diff_ir(data, output, binop=tir.Sub):
"""Low level IR to calculate adjacent difference in an 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
output: Buffer
A buffer to store adjacent difference, of the same shape as data. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
binop: function, optional
A binary associative op to use for calculating adjacent difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
output_ptr = ib.buffer_ptr(output)
batch_size = data.shape[0]
max_threads = _get_max_threads(batch_size)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
with ib.if_scope(tid == 0):
output_ptr[tid] = 0
with ib.else_scope():
output_ptr[tid] = tir.Cast(output.dtype, binop(data_ptr[tid], data_ptr[tid - 1]))
return ib.get()
def _calc_adjacent_diff(data, out_dtype="int32", binop=tir.Sub):
"""Function calculate adjacent difference in an 1-D array.
Parameters
----------
data : tvm.te.Tensor
Input 1-D tensor.
output_dtype : str
The output tensor data type.
binop: function, optional
A binary associative op to use for calculating difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
Returns
-------
output : tvm.te.Tensor
1-D tensor storing the adjacent difference of the input tensor. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
"""
data_buf = tir.decl_buffer(data.shape, data.dtype, "sorted_data_buf", data_alignment=8)
output_buf = tir.decl_buffer(data.shape, out_dtype, "output_buf", data_alignment=8)
return te.extern(
[data.shape],
[data],
lambda ins, outs: _calc_adjacent_diff_ir(ins[0], outs[0], binop=binop),
dtype=[out_dtype],
in_buffers=[data_buf],
out_buffers=[output_buf],
name="_calc_adjacent_diff",
tag="_calc_adjacent_diff_gpu",
)
@hybrid.script
def _calc_num_unique(inc_scan):
"""Helper function to get the number of unique elements fron inc_scan tensor"""
output = output_tensor((1,), "int32")
for i in bind("threadIdx.x", 1):
output[i] = inc_scan[inc_scan.shape[0] - 1] + int32(1)
return output
def _calc_unique_ir(
data, argsorted_indices, inc_scan, index_converter, unique_elements, inverse_indices, counts
):
"""Low level IR to calculate unique elements, inverse indices, and counts (optional) of
unique elements of 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
argsorted_indices : Buffer
A buffer that stores the argsorted indices of the input data.
inc_scan : Buffer
A buffer that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
index_converter (optional) : Buffer
An optional index converter that transforms the unique element index
such that new_idx = index_converter[old_idx].
unique_elements : Buffer
A buffer that stores the unique elements.
inverse_indices : Buffer
A buffer that stores the the index of each input data element in the unique element array.
counts (optional) : Buffer
A buffer that stores the count of each unique element.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
argsorted_indices_ptr = ib.buffer_ptr(argsorted_indices)
inc_scan_ptr = ib.buffer_ptr(inc_scan)
unique_elements_ptr = ib.buffer_ptr(unique_elements)
inverse_indices_ptr = ib.buffer_ptr(inverse_indices)
index_converter_ptr = None
if isinstance(index_converter, tir.Buffer):
index_converter_ptr = ib.buffer_ptr(index_converter)
if isinstance(counts, tir.Buffer):
counts_ptr = ib.buffer_ptr(counts)
# use indices_ptr as a tmp buffer to store tids with inc_scan[tid] != inc_scan[tid-1]
unique_seq_indices_ptr = ib.buffer_ptr(inverse_indices)
batch_size = data.shape[0]
max_threads = _get_max_threads(batch_size)
# if need to return counts
if isinstance(counts, tir.Buffer):
num_unique = inc_scan_ptr[inc_scan.shape[0] - 1] + 1
num_elements = data.shape[0]
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
with ib.if_scope(tid == 0):
unique_seq_indices_ptr[num_unique - 1] = num_elements
with ib.else_scope():
with ib.if_scope(inc_scan_ptr[tid] != inc_scan_ptr[tid - 1]):
unique_seq_indices_ptr[inc_scan_ptr[tid] - 1] = tid
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < num_unique):
unique_idx = tid if not index_converter_ptr else index_converter_ptr[tid]
with ib.if_scope(tid == 0):
counts_ptr[unique_idx] = unique_seq_indices_ptr[tid]
with ib.else_scope():
counts_ptr[unique_idx] = (
unique_seq_indices_ptr[tid] - unique_seq_indices_ptr[tid - 1]
)
# calculate unique elements and inverse indices
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
data_idx = argsorted_indices_ptr[tid]
unique_idx = (
inc_scan_ptr[tid]
if not index_converter_ptr
else index_converter_ptr[inc_scan_ptr[tid]]
)
inverse_indices_ptr[data_idx] = unique_idx
with ib.if_scope(tid == 0):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
with ib.else_scope():
with ib.if_scope(inc_scan_ptr[tid] != inc_scan_ptr[tid - 1]):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
return ib.get()
def _calc_first_occurence_ir(argsorted_indices, inc_scan, first_occurence):
"""Low level IR to calculate the first occurence of each unique element in the input data.
Parameters
----------
argsorted_indices : Buffer
A buffer that stores the argsorted indices of the input data.
inc_scan : Buffer
A buffer that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
first_occurence : Buffer
A buffer that stores the first occurence of each unique element in the input data.
"""
ib = tir.ir_builder.create()
argsorted_indices_ptr = ib.buffer_ptr(argsorted_indices)
inc_scan_ptr = ib.buffer_ptr(inc_scan)
first_occurence_ptr = ib.buffer_ptr(first_occurence)
batch_size = argsorted_indices.shape[0]
max_threads = _get_max_threads(batch_size)
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
first_occurence_ptr[tid] = batch_size
with ib.new_scope():
nthread_tx = max_threads
nthread_bx = ceil_div(batch_size, max_threads)
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", nthread_tx)
ib.scope_attr(bx, "thread_extent", nthread_bx)
tid = bx * max_threads + tx
with ib.if_scope(tid < batch_size):
with ib.if_scope(tid == 0):
first_occurence_ptr[inc_scan_ptr[tid]] = argsorted_indices_ptr[tid]
with ib.else_scope():
with ib.if_scope(inc_scan_ptr[tid] != inc_scan_ptr[tid - 1]):
first_occurence_ptr[inc_scan_ptr[tid]] = argsorted_indices_ptr[tid]
return ib.get()
def unique(data, is_sorted=True, return_counts=False):
"""
Find the unique elements of a 1-D tensor. Please note `output` and `counts` are all padded to
have the same length of `data` and element with index >= num_unique[0] has undefined value.
Parameters
----------
data : tvm.te.Tensor
A 1-D tensor of integers.
sorted : bool
Whether to sort the unique elements in ascending order before returning as output.
return_counts : bool
Whether to return the count of each unique element.
Returns
-------
unique : tvm.te.Tensor
A 1-D tensor containing the unique elements of the input data tensor. The same size as
the input data. If there are less unique elements than input data, the end of the tensor
is padded with zeros.
indices : tvm.te.Tensor
A 1-D tensor. The same size as output. For each entry in output, it contains
the index of its first occurence in the input data. The end of the tensor is padded
with the length of the input data.
inverse_indices : tvm.te.Tensor
A 1-D tensor. For each entry in data, it contains the index of that data element in the
unique array. (Note that inverse_indices is very similar to indices if output is not
sorted)
num_unique : tvm.te.Tensor
A 1-D tensor with size=1 containing the number of unique elements in the input data tensor.
counts (optional) : tvm.te.Tensor
A 1-D tensor containing the count of each unique element in the output.
Examples
--------
.. code-block:: python
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, False)
output = [4, 5, 1, 2, 3, ?, ?, ?]
indices = [0, 1, 2, 3, 4, ?, ?, ?]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
[output, indices, num_unique, counts] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, True)
output = [4, 5, 1, 2, 3, ?, ?, ?]
indices = [0, 1, 2, 3, 4, ?, ?, ?]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
counts = [2, 2, 1, 1, 2, ?, ?, ?]
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], True)
output = [1, 2, 3, 4, 5, ?, ?, ?]
indices = [2, 3, 4, 0, 1, ?, ?, ?]
inverse_indices = [3, 4, 0, 1, 2, 2, 3, 4]
num_unique = [5]
"""
sorted_data = sort(data)
argsorted_indices = argsort(data, dtype="int32")
# adjacent difference
adjacent_diff = _calc_adjacent_diff(sorted_data, out_dtype="int32", binop=tir.NE)
# inclusive scan
inc_scan = cumsum(adjacent_diff, dtype="int32", exclusive=0)
# total number of unique elements
num_unique_elements = _calc_num_unique(inc_scan)
# buffers
data_buf = tir.decl_buffer(data.shape, data.dtype, "data_buf", data_alignment=8)
argsorted_indices_buf = tir.decl_buffer(
data.shape, "int32", "argsorted_indices_buf", data_alignment=8
)
inc_scan_buf = tvm.tir.decl_buffer(data.shape, "int32", "inc_scan_buf", data_alignment=8)
unique_elements_buf = tir.decl_buffer(
data.shape, data.dtype, "unique_elements_buf", data_alignment=8
)
inverse_indices_buf = tvm.tir.decl_buffer(
data.shape, "int32", "inverse_indices_buf", data_alignment=8
)
# prepare outputs
if return_counts:
counts_buf = tir.decl_buffer(data.shape, "int32", "counts_buf", data_alignment=8)
out_data_shape = [data.shape] * 3
out_buffers = [unique_elements_buf, inverse_indices_buf, counts_buf]
out_dtypes = [data.dtype, "int32", "int32"]
else:
out_data_shape = [data.shape] * 2
out_buffers = [unique_elements_buf, inverse_indices_buf]
out_dtypes = [data.dtype, "int32"]
# prepare inputs and fcompute
# calculate first occurence
first_occurence_buf = tir.decl_buffer(
data.shape, "int32", "first_occurence_buf", data_alignment=8
)
first_occurence = te.extern(
[data.shape],
[argsorted_indices, inc_scan],
lambda ins, outs: _calc_first_occurence_ir(ins[0], ins[1], outs[0]),
dtype=["int32"],
in_buffers=[argsorted_indices_buf, inc_scan_buf],
out_buffers=[first_occurence_buf],
name="_calc_first_occurence",
tag="_calc_first_occurence_gpu",
)
if is_sorted:
in_data = [data, argsorted_indices, inc_scan]
in_buffers = [data_buf, argsorted_indices_buf, inc_scan_buf]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs, None)
indices = first_occurence
else:
# calculate index converter by sorting unique elements by their first occurence
argsorted_first_occurence = argsort(first_occurence, dtype="int32")
index_converter = argsort(argsorted_first_occurence, dtype="int32")
index_converter_buf = tir.decl_buffer(
data.shape, "int32", "index_converter_buf", data_alignment=8
)
in_data = [data, argsorted_indices, inc_scan, index_converter]
in_buffers = [data_buf, argsorted_indices_buf, inc_scan_buf, index_converter_buf]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs, None)
indices = sort(first_occurence)
outs = te.extern(
out_data_shape,
in_data,
fcompute,
dtype=out_dtypes,
in_buffers=in_buffers,
out_buffers=out_buffers,
name="_calc_unique",
tag="_calc_unique_gpu",
)
if return_counts:
return [outs[0], indices, outs[1], num_unique_elements, outs[2]]
return [outs[0], indices, outs[1], num_unique_elements]
|
{
"content_hash": "f5021b641f2a205156c78082575ff2d3",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 99,
"avg_line_length": 40.32835820895522,
"alnum_prop": 0.6042437700468789,
"repo_name": "dmlc/tvm",
"id": "8f78cc5fb9246ca815d0c2e93db4bf204b5b7e66",
"size": "17028",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/tvm/topi/cuda/unique.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6112"
},
{
"name": "C",
"bytes": "92947"
},
{
"name": "C++",
"bytes": "5765945"
},
{
"name": "CMake",
"bytes": "74045"
},
{
"name": "Go",
"bytes": "112384"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "171101"
},
{
"name": "JavaScript",
"bytes": "49803"
},
{
"name": "Makefile",
"bytes": "55807"
},
{
"name": "Objective-C",
"bytes": "15241"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "7183810"
},
{
"name": "Rust",
"bytes": "181961"
},
{
"name": "Scala",
"bytes": "202148"
},
{
"name": "Shell",
"bytes": "97271"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
}
|
import ctypes
import json
import os
from binascii import hexlify, unhexlify
import pytest
from pyasn1.codec.ber.decoder import decode as ber_decode
from pyasn1.codec.der.decoder import decode as der_decode
from pyasn1.codec.der.encoder import encode as der_encode
from pyasn1.type import namedtype, univ
class EcSignature(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("r", univ.Integer()),
namedtype.NamedType("s", univ.Integer()),
)
class EcKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("key_type", univ.ObjectIdentifier()),
namedtype.NamedType("curve_name", univ.ObjectIdentifier()),
)
class EcPublicKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("key_info", EcKeyInfo()),
namedtype.NamedType("public_key", univ.BitString()),
)
class EdKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("key_type", univ.ObjectIdentifier())
)
class EdPublicKey(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType("key_info", EdKeyInfo()),
namedtype.NamedType("public_key", univ.BitString()),
)
class ParseError(Exception):
pass
class NotSupported(Exception):
pass
class DataError(Exception):
pass
class curve_info(ctypes.Structure):
_fields_ = [("bip32_name", ctypes.c_char_p), ("params", ctypes.c_void_p)]
def keys_in_dict(dictionary, keys):
return keys <= set(dictionary.keys())
def parse_eddsa_signature(signature):
if len(signature) != 64:
raise ParseError("Not a valid EdDSA signature")
return signature
def parse_ecdh256_privkey(private_key):
if private_key < 0 or private_key.bit_length() > 256:
raise ParseError("Not a valid 256 bit ECDH private key")
return private_key.to_bytes(32, byteorder="big")
def parse_signed_hex(string):
if len(string) % 2 == 1:
string = "0" + string
number = int(string, 16)
if int(string[0], 16) & 8:
return -number
else:
return number
def parse_result(result):
if result == "valid":
return True
elif result == "invalid":
return False
elif result == "acceptable":
return None
else:
raise DataError()
def is_valid_der(data):
try:
structure, _ = der_decode(data)
return data == der_encode(structure)
except Exception:
return False
def parse_ed_pubkey(public_key):
try:
public_key, _ = ber_decode(public_key, asn1Spec=EdPublicKey())
except Exception:
raise ParseError("Not a BER encoded Edwards curve public key")
if not public_key["key_info"]["key_type"] == univ.ObjectIdentifier("1.3.101.112"):
raise ParseError("Not a BER encoded Edwards curve public key")
public_key = bytes(public_key["public_key"].asOctets())
return public_key
def parse_ec_pubkey(public_key):
try:
public_key, _ = ber_decode(public_key, asn1Spec=EcPublicKey())
except Exception:
raise ParseError("Not a BER encoded named elliptic curve public key")
if not public_key["key_info"]["key_type"] == univ.ObjectIdentifier(
"1.2.840.10045.2.1"
):
raise ParseError("Not a BER encoded named elliptic curve public key")
curve_identifier = public_key["key_info"]["curve_name"]
curve_name = get_curve_name_by_identifier(curve_identifier)
if curve_name is None:
raise NotSupported(
"Unsupported named elliptic curve: {}".format(curve_identifier)
)
try:
public_key = bytes(public_key["public_key"].asOctets())
except Exception:
raise ParseError("Not a BER encoded named elliptic curve public key")
return curve_name, public_key
def parse_ecdsa256_signature(signature):
s = signature
if not is_valid_der(signature):
raise ParseError("Not a valid DER")
try:
signature, _ = der_decode(signature, asn1Spec=EcSignature())
except Exception:
raise ParseError("Not a valid DER encoded ECDSA signature")
try:
r = int(signature["r"]).to_bytes(32, byteorder="big")
s = int(signature["s"]).to_bytes(32, byteorder="big")
signature = r + s
except Exception:
raise ParseError("Not a valid DER encoded 256 bit ECDSA signature")
return signature
def parse_digest(name):
if name == "SHA-256":
return 0
else:
raise NotSupported("Unsupported hash function: {}".format(name))
def get_curve_by_name(name):
lib.get_curve_by_name.restype = ctypes.c_void_p
curve = lib.get_curve_by_name(bytes(name, "ascii"))
if curve is None:
return None
curve = ctypes.cast(curve, ctypes.POINTER(curve_info))
return ctypes.c_void_p(curve.contents.params)
def parse_curve_name(name):
if name == "secp256r1":
return "nist256p1"
elif name == "secp256k1":
return "secp256k1"
elif name == "curve25519":
return "curve25519"
else:
return None
def get_curve_name_by_identifier(identifier):
if identifier == univ.ObjectIdentifier("1.3.132.0.10"):
return "secp256k1"
elif identifier == univ.ObjectIdentifier("1.2.840.10045.3.1.7"):
return "nist256p1"
else:
return None
def chacha_poly_encrypt(key, iv, associated_data, plaintext):
context = bytes(context_structure_length)
tag = bytes(16)
ciphertext = bytes(len(plaintext))
lib.rfc7539_init(context, key, iv)
lib.rfc7539_auth(context, associated_data, len(associated_data))
lib.chacha20poly1305_encrypt(context, plaintext, ciphertext, len(plaintext))
lib.rfc7539_finish(context, len(associated_data), len(plaintext), tag)
return ciphertext, tag
def chacha_poly_decrypt(key, iv, associated_data, ciphertext, tag):
context = bytes(context_structure_length)
computed_tag = bytes(16)
plaintext = bytes(len(ciphertext))
lib.rfc7539_init(context, key, iv)
lib.rfc7539_auth(context, associated_data, len(associated_data))
lib.chacha20poly1305_decrypt(context, ciphertext, plaintext, len(ciphertext))
lib.rfc7539_finish(context, len(associated_data), len(ciphertext), computed_tag)
return plaintext if tag == computed_tag else False
def add_pkcs_padding(data):
padding_length = 16 - len(data) % 16
return data + bytes([padding_length] * padding_length)
def remove_pkcs_padding(data):
padding_length = data[-1]
if not (
0 < padding_length <= 16
and data[-padding_length:] == bytes([padding_length] * padding_length)
):
return False
else:
return data[:-padding_length]
def aes_encrypt_initialise(key, context):
if len(key) == (128 / 8):
lib.aes_encrypt_key128(key, context)
elif len(key) == (192 / 8):
lib.aes_encrypt_key192(key, context)
elif len(key) == (256 / 8):
lib.aes_encrypt_key256(key, context)
else:
raise NotSupported("Unsupported key length: {}".format(len(key) * 8))
def aes_cbc_encrypt(key, iv, plaintext):
plaintext = add_pkcs_padding(plaintext)
context = bytes(context_structure_length)
ciphertext = bytes(len(plaintext))
aes_encrypt_initialise(key, context)
lib.aes_cbc_encrypt(
plaintext, ciphertext, len(plaintext), bytes(bytearray(iv)), context
)
return ciphertext
def aes_decrypt_initialise(key, context):
if len(key) == (128 / 8):
lib.aes_decrypt_key128(key, context)
elif len(key) == (192 / 8):
lib.aes_decrypt_key192(key, context)
elif len(key) == (256 / 8):
lib.aes_decrypt_key256(key, context)
else:
raise NotSupported("Unsupported AES key length: {}".format(len(key) * 8))
def aes_cbc_decrypt(key, iv, ciphertext):
context = bytes(context_structure_length)
plaintext = bytes(len(ciphertext))
aes_decrypt_initialise(key, context)
lib.aes_cbc_decrypt(ciphertext, plaintext, len(ciphertext), iv, context)
return remove_pkcs_padding(plaintext)
def load_json_testvectors(filename):
try:
result = json.loads(open(os.path.join(testvectors_directory, filename)).read())
except Exception:
raise DataError()
return result
def generate_aes(filename):
vectors = []
data = load_json_testvectors(filename)
if not keys_in_dict(data, {"algorithm", "testGroups"}):
raise DataError()
if data["algorithm"] != "AES-CBC-PKCS5":
raise DataError()
for test_group in data["testGroups"]:
if not keys_in_dict(test_group, {"tests"}):
raise DataError()
for test in test_group["tests"]:
if not keys_in_dict(test, {"key", "iv", "msg", "ct", "result"}):
raise DataError()
try:
key = unhexlify(test["key"])
iv = unhexlify(test["iv"])
plaintext = unhexlify(test["msg"])
ciphertext = unhexlify(test["ct"])
result = parse_result(test["result"])
except Exception:
raise DataError()
if len(key) not in [128 / 8, 192 / 8, 256 / 8]:
continue
if result is None:
continue
vectors.append(
(
hexlify(key),
hexlify(iv),
hexlify(plaintext),
hexlify(ciphertext),
result,
)
)
return vectors
def generate_chacha_poly(filename):
vectors = []
data = load_json_testvectors(filename)
if not keys_in_dict(data, {"algorithm", "testGroups"}):
raise DataError()
if data["algorithm"] != "CHACHA20-POLY1305":
raise DataError()
for test_group in data["testGroups"]:
if not keys_in_dict(test_group, {"tests"}):
raise DataError()
for test in test_group["tests"]:
if not keys_in_dict(
test, {"key", "iv", "aad", "msg", "ct", "tag", "result"}
):
raise DataError()
try:
key = unhexlify(test["key"])
iv = unhexlify(test["iv"])
associated_data = unhexlify(test["aad"])
plaintext = unhexlify(test["msg"])
ciphertext = unhexlify(test["ct"])
tag = unhexlify(test["tag"])
result = parse_result(test["result"])
except Exception:
raise DataError()
if result is None:
continue
vectors.append(
(
hexlify(key),
hexlify(iv),
hexlify(associated_data),
hexlify(plaintext),
hexlify(ciphertext),
hexlify(tag),
result,
)
)
return vectors
def generate_curve25519_dh(filename):
vectors = []
data = load_json_testvectors(filename)
if not keys_in_dict(data, {"algorithm", "testGroups"}):
raise DataError()
if data["algorithm"] != "X25519":
raise DataError()
for test_group in data["testGroups"]:
if not keys_in_dict(test_group, {"tests"}):
raise DataError()
for test in test_group["tests"]:
if not keys_in_dict(
test, {"public", "private", "shared", "result", "curve"}
):
raise DataError()
try:
public_key = unhexlify(test["public"])
curve_name = parse_curve_name(test["curve"])
private_key = unhexlify(test["private"])
shared = unhexlify(test["shared"])
result = parse_result(test["result"])
except Exception:
raise DataError()
if curve_name != "curve25519":
continue
if result is None:
continue
vectors.append(
(hexlify(public_key), hexlify(private_key), hexlify(shared), result)
)
return vectors
def generate_ecdh(filename):
vectors = []
data = load_json_testvectors(filename)
if not keys_in_dict(data, {"algorithm", "testGroups"}):
raise DataError()
if data["algorithm"] != "ECDH":
raise DataError()
for test_group in data["testGroups"]:
if not keys_in_dict(test_group, {"tests"}):
raise DataError()
for test in test_group["tests"]:
if not keys_in_dict(
test, {"public", "private", "shared", "result", "curve"}
):
raise DataError()
try:
public_key = unhexlify(test["public"])
curve_name = parse_curve_name(test["curve"])
private_key = parse_signed_hex(test["private"])
shared = unhexlify(test["shared"])
result = parse_result(test["result"])
except Exception:
raise DataError()
try:
private_key = parse_ecdh256_privkey(private_key)
except ParseError:
continue
try:
key_curve_name, public_key = parse_ec_pubkey(public_key)
except NotSupported:
continue
except ParseError:
continue
if key_curve_name != curve_name:
continue
if result is None:
continue
vectors.append(
(
curve_name,
hexlify(public_key),
hexlify(private_key),
hexlify(shared),
result,
)
)
return vectors
def generate_ecdsa(filename):
vectors = []
data = load_json_testvectors(filename)
if not keys_in_dict(data, {"algorithm", "testGroups"}):
raise DataError()
if data["algorithm"] != "ECDSA":
raise DataError()
for test_group in data["testGroups"]:
if not keys_in_dict(test_group, {"tests", "keyDer", "sha"}):
raise DataError()
try:
public_key = unhexlify(test_group["keyDer"])
except Exception:
raise DataError()
try:
curve_name, public_key = parse_ec_pubkey(public_key)
except NotSupported:
continue
except ParseError:
continue
try:
hasher = parse_digest(test_group["sha"])
except NotSupported:
continue
for test in test_group["tests"]:
if not keys_in_dict(test, {"sig", "msg", "result"}):
raise DataError()
try:
signature = unhexlify(test["sig"])
message = unhexlify(test["msg"])
result = parse_result(test["result"])
except Exception:
raise DataError()
if result is None:
continue
try:
signature = parse_ecdsa256_signature(signature)
except ParseError:
continue
vectors.append(
(
curve_name,
hexlify(public_key),
hasher,
hexlify(message),
hexlify(signature),
result,
)
)
return vectors
def generate_eddsa(filename):
vectors = []
data = load_json_testvectors(filename)
if not keys_in_dict(data, {"algorithm", "testGroups"}):
raise DataError()
if data["algorithm"] != "EDDSA":
raise DataError()
for test_group in data["testGroups"]:
if not keys_in_dict(test_group, {"tests", "keyDer"}):
raise DataError()
try:
public_key = unhexlify(test_group["keyDer"])
except Exception:
raise DataError()
try:
public_key = parse_ed_pubkey(public_key)
except ParseError:
continue
for test in test_group["tests"]:
if not keys_in_dict(test, {"sig", "msg", "result"}):
raise DataError()
try:
signature = unhexlify(test["sig"])
message = unhexlify(test["msg"])
result = parse_result(test["result"])
except Exception:
raise DataError()
if result is None:
continue
try:
signature = parse_eddsa_signature(signature)
except ParseError:
continue
vectors.append(
(hexlify(public_key), hexlify(message), hexlify(signature), result)
)
return vectors
dir = os.path.abspath(os.path.dirname(__file__))
lib = ctypes.cdll.LoadLibrary(os.path.join(dir, "libtrezor-crypto.so"))
testvectors_directory = os.path.join(dir, "wycheproof/testvectors")
context_structure_length = 1024
ecdh_vectors = generate_ecdh("ecdh_test.json")
curve25519_dh_vectors = generate_curve25519_dh("x25519_test.json")
eddsa_vectors = generate_eddsa("eddsa_test.json")
ecdsa_vectors = (
generate_ecdsa("ecdsa_test.json")
+ generate_ecdsa("ecdsa_secp256k1_sha256_test.json")
+ generate_ecdsa("ecdsa_secp256r1_sha256_test.json")
)
ecdh_vectors = (
generate_ecdh("ecdh_test.json")
+ generate_ecdh("ecdh_secp256k1_test.json")
+ generate_ecdh("ecdh_secp256r1_test.json")
)
chacha_poly_vectors = generate_chacha_poly("chacha20_poly1305_test.json")
aes_vectors = generate_aes("aes_cbc_pkcs5_test.json")
@pytest.mark.parametrize("public_key, message, signature, result", eddsa_vectors)
def test_eddsa(public_key, message, signature, result):
public_key = unhexlify(public_key)
signature = unhexlify(signature)
message = unhexlify(message)
computed_result = (
lib.ed25519_sign_open(message, len(message), public_key, signature) == 0
)
assert result == computed_result
@pytest.mark.parametrize(
"curve_name, public_key, hasher, message, signature, result", ecdsa_vectors
)
def test_ecdsa(curve_name, public_key, hasher, message, signature, result):
curve = get_curve_by_name(curve_name)
if curve is None:
raise NotSupported("Curve not supported: {}".format(curve_name))
public_key = unhexlify(public_key)
signature = unhexlify(signature)
message = unhexlify(message)
computed_result = (
lib.ecdsa_verify(curve, hasher, public_key, signature, message, len(message))
== 0
)
assert result == computed_result
@pytest.mark.parametrize(
"public_key, private_key, shared, result", curve25519_dh_vectors
)
def test_curve25519_dh(public_key, private_key, shared, result):
public_key = unhexlify(public_key)
private_key = unhexlify(private_key)
shared = unhexlify(shared)
computed_shared = bytes([0] * 32)
lib.curve25519_scalarmult(computed_shared, private_key, public_key)
computed_result = shared == computed_shared
assert result == computed_result
@pytest.mark.parametrize(
"curve_name, public_key, private_key, shared, result", ecdh_vectors
)
def test_ecdh(curve_name, public_key, private_key, shared, result):
curve = get_curve_by_name(curve_name)
if curve is None:
raise NotSupported("Curve not supported: {}".format(curve_name))
public_key = unhexlify(public_key)
private_key = unhexlify(private_key)
shared = unhexlify(shared)
computed_shared = bytes([0] * 2 * 32)
lib.ecdh_multiply(curve, private_key, public_key, computed_shared)
computed_shared = computed_shared[1:33]
computed_result = shared == computed_shared
assert result == computed_result
@pytest.mark.parametrize(
"key, iv, associated_data, plaintext, ciphertext, tag, result", chacha_poly_vectors
)
def test_chacha_poly(key, iv, associated_data, plaintext, ciphertext, tag, result):
key = unhexlify(key)
iv = unhexlify(iv)
associated_data = unhexlify(associated_data)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
tag = unhexlify(tag)
computed_ciphertext, computed_tag = chacha_poly_encrypt(
key, iv, associated_data, plaintext
)
computed_result = ciphertext == computed_ciphertext and tag == computed_tag
assert result == computed_result
computed_plaintext = chacha_poly_decrypt(key, iv, associated_data, ciphertext, tag)
computed_result = plaintext == computed_plaintext
assert result == computed_result
@pytest.mark.parametrize("key, iv, plaintext, ciphertext, result", aes_vectors)
def test_aes(key, iv, plaintext, ciphertext, result):
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
computed_ciphertext = aes_cbc_encrypt(key, iv, plaintext)
computed_result = ciphertext == computed_ciphertext
assert result == computed_result
computed_plaintext = aes_cbc_decrypt(key, bytes(iv), ciphertext)
computed_result = plaintext == computed_plaintext
assert result == computed_result
|
{
"content_hash": "493c6e090cd307d9e27cb9d65249d8df",
"timestamp": "",
"source": "github",
"line_count": 720,
"max_line_length": 87,
"avg_line_length": 29.5125,
"alnum_prop": 0.5962162925314133,
"repo_name": "trezor/trezor-crypto",
"id": "c449b29c8a73dd3687e3b0d103378af0fb9a4c28",
"size": "21271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_wycheproof.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1300253"
},
{
"name": "C++",
"bytes": "40162"
},
{
"name": "HTML",
"bytes": "438"
},
{
"name": "Makefile",
"bytes": "3724"
},
{
"name": "Python",
"bytes": "34917"
},
{
"name": "QMake",
"bytes": "694"
},
{
"name": "Ruby",
"bytes": "2644"
}
],
"symlink_target": ""
}
|
"""
The :mod:`surprise.accuracy` module provides with tools for computing accuracy
metrics on a set of predictions.
Available accuracy metrics:
.. autosummary::
:nosignatures:
rmse
mae
fcp
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import defaultdict
import numpy as np
from six import iteritems
def rmse(predictions, verbose=True):
"""Compute RMSE (Root Mean Squared Error).
.. math::
\\text{RMSE} = \\sqrt{\\frac{1}{|\\hat{R}|} \\sum_{\\hat{r}_{ui} \in
\\hat{R}}(r_{ui} - \\hat{r}_{ui})^2}.
Args:
predictions (:obj:`list` of :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>`):
A list of predictions, as returned by the :meth:`test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>` method.
verbose: If True, will print computed value. Default is ``True``.
Returns:
The Root Mean Squared Error of predictions.
Raises:
ValueError: When ``predictions`` is empty.
"""
if not predictions:
raise ValueError('Prediction list is empty.')
mse = np.mean([float((true_r - est)**2)
for (_, _, true_r, est, _) in predictions])
rmse_ = np.sqrt(mse)
if verbose:
print('RMSE: {0:1.4f}'.format(rmse_))
return rmse_
def mae(predictions, verbose=True):
"""Compute MAE (Mean Absolute Error).
.. math::
\\text{MAE} = \\frac{1}{|\\hat{R}|} \\sum_{\\hat{r}_{ui} \in
\\hat{R}}|r_{ui} - \\hat{r}_{ui}|
Args:
predictions (:obj:`list` of :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>`):
A list of predictions, as returned by the :meth:`test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>` method.
verbose: If True, will print computed value. Default is ``True``.
Returns:
The Mean Absolute Error of predictions.
Raises:
ValueError: When ``predictions`` is empty.
"""
if not predictions:
raise ValueError('Prediction list is empty.')
mae_ = np.mean([float(abs(true_r - est))
for (_, _, true_r, est, _) in predictions])
if verbose:
print('MAE: {0:1.4f}'.format(mae_))
return mae_
def fcp(predictions, verbose=True):
"""Compute FCP (Fraction of Concordant Pairs).
Computed as described in paper `Collaborative Filtering on Ordinal User
Feedback <http://www.ijcai.org/Proceedings/13/Papers/449.pdf>`_ by Koren
and Sill, section 5.2.
Args:
predictions (:obj:`list` of :obj:`Prediction\
<surprise.prediction_algorithms.predictions.Prediction>`):
A list of predictions, as returned by the :meth:`test()
<surprise.prediction_algorithms.algo_base.AlgoBase.test>` method.
verbose: If True, will print computed value. Default is ``True``.
Returns:
The Fraction of Concordant Pairs.
Raises:
ValueError: When ``predictions`` is empty.
"""
if not predictions:
raise ValueError('Prediction list is empty.')
predictions_u = defaultdict(list)
nc_u = defaultdict(int)
nd_u = defaultdict(int)
for u0, _, r0, est, _ in predictions:
predictions_u[u0].append((r0, est))
for u0, preds in iteritems(predictions_u):
for r0i, esti in preds:
for r0j, estj in preds:
if esti > estj and r0i > r0j:
nc_u[u0] += 1
if esti >= estj and r0i < r0j:
nd_u[u0] += 1
nc = np.mean(list(nc_u.values())) if nc_u else 0
nd = np.mean(list(nd_u.values())) if nd_u else 0
try:
fcp = nc / (nc + nd)
except ZeroDivisionError:
raise ValueError('cannot compute fcp on this list of prediction. ' +
'Does every user have at least two predictions?')
if verbose:
print('FCP: {0:1.4f}'.format(fcp))
return fcp
|
{
"content_hash": "d8a84649b83dff3acf29956c95d2d53d",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 78,
"avg_line_length": 28.517482517482517,
"alnum_prop": 0.5897498773908779,
"repo_name": "charmoniumQ/Surprise",
"id": "1e9e4855bd229e5188ed9b790d6099eb0ab5ec3c",
"size": "4078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "surprise/accuracy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "177820"
}
],
"symlink_target": ""
}
|
import pytest
from fixture.application import Application
import json
import os.path
import importlib
import jsonpickle
from fixture.db import DbFixture
fixture = None
target = None
def load_config(file):
global target
if target is None:
config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file)
with open(config_file) as f:
target = json.load(f)
return target
@pytest.fixture
def app(request):
global fixture
browser = request.config.getoption("--browser")
web_config = load_config(request.config.getoption("--target"))["web"]
if fixture is None or not fixture.is_valid():
fixture = Application(browser=browser, baseURL=web_config["baseURL"])
fixture.session.ensure_login(username=web_config["username"], password=web_config["password"])
return fixture
@pytest.fixture(scope="session")
def db(request):
db_config = load_config(request.config.getoption("--target"))["db"]
dbfixture = DbFixture(host=db_config["host"], name=db_config["name"],
user=db_config["user"], password=db_config["password"])
def fin():
dbfixture.destroy()
request.addfinalizer(fin)
return dbfixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture
@pytest.fixture(scope="session", autouse=True)
def check_ui(request):
return request.config.getoption("--check_ui")
def pytest_addoption(parser):
parser.addoption("--browser", action="store", default="firefox")
parser.addoption("--target", action="store", default="target.json")
parser.addoption("--check_ui", action="store_true")
def pytest_generate_tests(metafunc):
for fixture in metafunc.fixturenames:
if fixture.startswith("data_"):
testdata = load_from_module(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
elif fixture.startswith("json_"):
testdata = load_from_json(fixture[5:])
metafunc.parametrize(fixture, testdata, ids=[str(x) for x in testdata])
def load_from_module(module):
return importlib.import_module("data.%s" % module).testdata
def load_from_json(file):
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/%s.json" % file)) as f:
return jsonpickle.decode(f.read())
|
{
"content_hash": "7eff90786ea1f8c66be4a66d55e059f5",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 100,
"avg_line_length": 30.580246913580247,
"alnum_prop": 0.6729915220024223,
"repo_name": "yupasik/python_training",
"id": "4653f66bd51ec30d9c1d82d823dc63c0a6e76cd0",
"size": "2477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Cucumber",
"bytes": "3565"
},
{
"name": "Python",
"bytes": "50254"
}
],
"symlink_target": ""
}
|
from paver.easy import *
from paver.setuputils import setup, find_package_data, find_packages
from paver.setuputils import install_distutils_tasks
from moksha.lib.paver_tasks import *
install_distutils_tasks()
options(
setup=Bunch(
name="moksha.metrics",
version="0.1",
release="1",
url="http://moksha.fedorahosted.org",
description="Moksha Metrics App",
long_description="",
author="Luke Macken",
author_email="lmacken@redhat.com",
license="ASL 2.0",
rpm_name='moksha-metrics',
packages=find_packages(),
package_data=find_package_data(),
namespace_packages=['moksha', 'moksha.apps'],
install_requires=["Moksha"],
entry_points={
'moksha.stream': (
'moksha_metrics = moksha.apps.metrics.streams:MokshaMetricsDataStream',
),
'moksha.consumer': (
'moksha_message_metrics = moksha.apps.metrics.consumers:MokshaMessageMetricsConsumer',
),
'moksha.widget': (
### Commented out from the tw1/tw2 config conversion
#'MokshaTW2CPUUsageWidget = moksha.apps.metrics.widgets:MokshaTW2CPUUsageWidget',
'MokshaMemoryUsageWidget = moksha.apps.metrics.widgets:MokshaMemoryUsageWidget',
'MokshaCPUUsageWidget = moksha.apps.metrics.widgets:MokshaCPUUsageWidget',
'MokshaMessageMetricsWidget = moksha.apps.metrics.widgets:MokshaMessageMetricsWidget',
),
'moksha.global': (
'moksha_socket = moksha.api.widgets:moksha_socket',
),
}
),
)
|
{
"content_hash": "33c36423e3827986b61d55f391bfc0d5",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 102,
"avg_line_length": 39,
"alnum_prop": 0.6106141920095408,
"repo_name": "ralphbean/moksha",
"id": "97bebf04281fd87ba627c05bcc694d332b622d0e",
"size": "1677",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "moksha/apps/metrics/pavement.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "1249457"
},
{
"name": "Python",
"bytes": "731300"
},
{
"name": "Shell",
"bytes": "1776"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform
def create_entity_type_sample(
project: str,
featurestore_id: str,
entity_type_id: str,
description: str = "sample entity type",
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
timeout: int = 300,
):
# The AI Platform services require regional API endpoints, which need to be
# in the same region or multi-region overlap with the Feature Store location.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.FeaturestoreServiceClient(client_options=client_options)
parent = f"projects/{project}/locations/{location}/featurestores/{featurestore_id}"
create_entity_type_request = aiplatform.gapic.CreateEntityTypeRequest(
parent=parent,
entity_type_id=entity_type_id,
entity_type=aiplatform.gapic.EntityType(description=description),
)
lro_response = client.create_entity_type(request=create_entity_type_request)
print("Long running operation:", lro_response.operation.name)
create_entity_type_response = lro_response.result(timeout=timeout)
print("create_entity_type_response:", create_entity_type_response)
# [END aiplatform_create_entity_type_sample]
|
{
"content_hash": "4d94653b748bc35a6e469de6b08663b0",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 89,
"avg_line_length": 45.58064516129032,
"alnum_prop": 0.7317763623496107,
"repo_name": "googleapis/python-aiplatform",
"id": "d650b396ccddd9155894bbcf1fa1cfe92bee41f9",
"size": "2205",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/feature_store_service/create_entity_type_sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
import re
from collections import defaultdict
from .dialect import Dialect
from .errors import NoSuchLanguageException
KEYWORD_PREFIX_BULLET = '^(\\s*[*+-]\\s*)'
KEYWORD_PREFIX_HEADER = '^(#{1,6}\\s)'
class GherkinInMarkdownTokenMatcher(object):
LANGUAGE_RE = re.compile(r"^\s*#\s*language\s*:\s*([a-zA-Z\-_]+)\s*$")
def __init__(self, dialect_name='en'):
self._default_dialect_name = dialect_name
self._change_dialect(dialect_name)
self.reset()
def reset(self):
if self.dialect_name != self._default_dialect_name:
self._change_dialect(self._default_dialect_name)
self._indent_to_remove = 0
self._active_doc_string_separator = None
self.matched_feature_line=False
def match_FeatureLine(self, token):
if(self.matched_feature_line):
self._set_token_matched(token,None)
# We first try to match "# Feature: blah"
result = self._match_title_line(KEYWORD_PREFIX_HEADER, self.dialect.feature_keywords, ':', token, 'FeatureLine')
# If we didn't match "# Feature: blah", we still match this line
# as a FeatureLine.
# The reason for this is that users may not want to be constrained by having this as their fist line.
if not result:
self._set_token_matched(token,'FeatureLine',token.line.get_line_text())
self.matched_feature_line=result
return result
def match_RuleLine(self, token):
return self._match_title_line(KEYWORD_PREFIX_HEADER, self.dialect.rule_keywords, ':', token, 'RuleLine')
def match_ScenarioLine(self, token):
return self._match_title_line(KEYWORD_PREFIX_HEADER, self.dialect.scenario_keywords, ':', token, 'ScenarioLine') or self._match_title_line(KEYWORD_PREFIX_HEADER, self.dialect.scenario_outline_keywords, ':', token, 'ScenarioLine')
def match_BackgroundLine(self, token):
return self._match_title_line(KEYWORD_PREFIX_HEADER, self.dialect.background_keywords, ':', token, 'BackgroundLine')
def match_ExamplesLine(self, token):
return self._match_title_line(KEYWORD_PREFIX_HEADER, self.dialect.examples_keywords, ':', token, 'ExamplesLine')
def match_TableRow(self, token):
# Gherkin tables must be indented 2-5 spaces in order to be distinguidedn from non-Gherkin tables
if re.match('^\\s\\s\\s?\\s?\\s?\\|',token.line.get_line_text(0)):
table_cells = token.line.table_cells
if(self._is_gfm_table_separator(table_cells)):
return False
self._set_token_matched(token, 'TableRow', keyword='|',items=token.line.table_cells)
return True
return False
def _is_gfm_table_separator(self, table_cells):
text_of_table_cells = map(lambda x: x['text'], table_cells)
separator_values = list(filter(lambda x: re.match('^:?-+:?$',x),text_of_table_cells))
return len(separator_values) > 0
def match_StepLine(self, token):
nonStarStepKeywords = (self.dialect.given_keywords +
self.dialect.when_keywords +
self.dialect.then_keywords +
self.dialect.and_keywords +
self.dialect.but_keywords)
return self._match_title_line(KEYWORD_PREFIX_BULLET, nonStarStepKeywords, '', token, 'StepLine')
def match_Comment(self, token):
if(token.line.startswith('|')):
table_cells = token.line.table_cells
if(self._is_gfm_table_separator(table_cells)):
return True
return self._set_token_matched(token,None,False)
def match_Empty(self, token):
result = False
if token.line.is_empty():
result = True
if ( not self.match_TagLine(token) and
not self.match_FeatureLine(token) and
not self.match_ScenarioLine(token) and
not self.match_BackgroundLine(token) and
not self.match_ExamplesLine(token) and
not self.match_RuleLine(token) and
not self.match_TableRow(token) and
not self.match_Comment(token) and
not self.match_Language(token) and
not self.match_DocStringSeparator(token) and
not self.match_EOF(token) and
not self.match_StepLine(token)
):
# neutered
result = True
if(result):
self._set_token_matched(token, 'Empty', indent=0)
return result
return False
# We've made a deliberate choice not to support `# language: [ISO 639-1]` headers or similar
# in Markdown. Users should specify a language globally.
def match_Language(self, token):
if not token:
raise ValueError('no token')
return False
def match_TagLine(self, token):
tags = []
matching_tags = re.finditer('`(@[^`]+)`', token.line.get_line_text())
idx=0
for match in matching_tags:
tags.append({
'column': token.line.indent + match.start(idx) + 2,
'text': match.group(1)
})
if(len(tags) == 0):
return False
self._set_token_matched(token, 'TagLine', items=tags)
return True
def match_DocStringSeparator(self, token):
if not self._active_doc_string_separator:
# open
return (self._match_DocStringSeparator(token, '"""', True) or
self._match_DocStringSeparator(token, '````', True) or self._match_DocStringSeparator(token, '```', True))
else:
# close
return self._match_DocStringSeparator(token, self._active_doc_string_separator, False)
def _match_DocStringSeparator(self, token, separator, is_open):
if not token.line.startswith(separator):
return False
content_type = ''
if is_open:
content_type = token.line.get_rest_trimmed(len(separator))
self._active_doc_string_separator = separator
self._indent_to_remove = token.line.indent
else:
self._active_doc_string_separator = None
self._indent_to_remove = 0
# TODO: Use the separator as keyword. That's needed for pretty printing.
self._set_token_matched(token, 'DocStringSeparator', content_type, separator)
return True
def match_Other(self, token):
# take the entire line, except removing DocString indents
text = token.line.get_line_text(self._indent_to_remove)
self._set_token_matched(token, 'Other', self._unescaped_docstring(text), indent=0)
return True
def match_EOF(self, token):
if not token.eof():
return False
self._set_token_matched(token, 'EOF')
return True
def _match_title_line(self, prefix, keywords, keywordSuffix, token, token_type):
keywords_or_list="|".join(map(lambda x: re.escape(x), keywords))
match = re.search(u'{}({}){}(.*)'.format(prefix, keywords_or_list, keywordSuffix), token.line.get_line_text())
indent = token.line.indent
result = False
if(match):
matchedKeyword = match.group(2)
indent += len(match.group(1))
self._set_token_matched(token, token_type, match.group(3).strip(), matchedKeyword, indent=indent)
return True
return False
def _set_token_matched(self, token, matched_type, text=None,
keyword=None, keyword_type=None, indent=None, items=None):
if items is None:
items = []
token.matched_type = matched_type
# text == '' should not result in None
token.matched_text = text.rstrip('\r\n') if text is not None else None
token.matched_keyword = keyword
token.matched_keyword_type = keyword_type
if indent is not None:
token.matched_indent = indent
else:
token.matched_indent = token.line.indent if token.line else 0
token.matched_items = items
token.location['column'] = token.matched_indent + 1
token.matched_gherkin_dialect = self.dialect_name
def _change_dialect(self, dialect_name, location=None):
dialect = Dialect.for_name(dialect_name)
if not dialect:
raise NoSuchLanguageException(dialect_name, location)
self.dialect_name = dialect_name
self.dialect = dialect
self.keyword_types = defaultdict(list)
for keyword in self.dialect.given_keywords:
self.keyword_types[keyword].append('Context')
for keyword in self.dialect.when_keywords:
self.keyword_types[keyword].append('Action')
for keyword in self.dialect.then_keywords:
self.keyword_types[keyword].append('Outcome')
for keyword in self.dialect.and_keywords + self.dialect.but_keywords:
self.keyword_types[keyword].append('Conjunction')
def _unescaped_docstring(self, text):
if self._active_doc_string_separator == '"""':
return text.replace('\\"\\"\\"', '"""')
elif self._active_doc_string_separator == '```':
return text.replace('\\`\\`\\`', '```')
else:
return text
|
{
"content_hash": "a84f8c0e18601b7252fd414d87c4412b",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 237,
"avg_line_length": 40.34199134199134,
"alnum_prop": 0.6069320742568945,
"repo_name": "cucumber/gherkin-python",
"id": "31720018058a3d21679b69ae1e911ff95b420381",
"size": "9319",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "gherkin/token_matcher_markdown.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6854"
},
{
"name": "Makefile",
"bytes": "4083"
},
{
"name": "Python",
"bytes": "227759"
},
{
"name": "Shell",
"bytes": "1087"
}
],
"symlink_target": ""
}
|
import time
import pymongo
from base import *
import bnw.core.bnw_objects as objs
def get_user_bl(request, use_bl=False):
"""Return authed user blacklist or simply an empty list
if user not authed.
:param use_bl: default False. Whether we should return actual
blacklist or just empty list.
"""
if use_bl and request.user:
bl = request.user.get('blacklist', [])
result = {'tag':set(), 'user':set(), 'club': set()}
for el in bl:
result[el[0]].add(el[1])
return result
else:
return []
@defer.inlineCallbacks
def set_subscriptions_info(request, messages):
"""Add 'subscribed' param for each message which
indicate do the user subscribed on the message or not.
Return updated list of messages (update in place actually!).
For non-authed users return non-modified list.
:param request: BnW request object.
"""
if not request.user:
defer.returnValue(messages)
user = request.user['name']
ids = [m['id'] for m in messages]
subscriptions = yield objs.Subscription.find({
'user': user, 'type': 'sub_message', 'target': {'$in': ids}})
sub_ids = [s['target'] for s in subscriptions]
for msg in messages:
msg['subscribed'] = True if msg['id'] in sub_ids else False
defer.returnValue(messages)
def replace_banned(regions, message, kind='message'):
banned_regions = regions.intersection(set(message.get('banned_in', [])))
if len(banned_regions) > 0:
banmsg = 'This %s is banned in regions: %s' % (kind, ','.join(banned_regions))
message['banned'] = True
message['text'] = '**'+banmsg+'**'
banhtml = '''<span style='color: #f00;'>'''+banmsg+'</span>'
message['html'] = { 'secure': [banhtml, []], 'insecure': [banhtml, []] }
return message
@defer.inlineCallbacks
def showSearch(parameters, page, request):
messages = [x.filter_fields() for x in (yield objs.Message.find_sort(
parameters, [('date', pymongo.DESCENDING)], limit=20, skip=page * 20))]
messages = yield set_subscriptions_info(request, messages)
regions = request.regions
for message in messages:
replace_banned(regions, message)
messages.reverse()
defer.returnValue(dict(
ok=True, format="messages", cache=5, cache_public=True,
messages=messages))
@defer.inlineCallbacks
def showComment(commentid, request):
comment = yield objs.Comment.find_one({'id': commentid})
if comment is None:
defer.returnValue(
dict(ok=False, desc='No such comment',
cache=5, cache_public=True)
)
replace_banned(request.regions, comment, 'comment')
defer.returnValue(
dict(ok=True, format='comment', cache=5, cache_public=True,
comment=comment.filter_fields(),
))
@defer.inlineCallbacks
def showComments(msgid, request, bl=None, after=''):
message = yield objs.Message.find_one({'id': msgid})
if message is None:
defer.returnValue(dict(
ok=False, desc='No such message', cache=5, cache_public=True))
if request.user:
user = request.user['name']
subscribed = yield objs.Subscription.count({
'user': user, 'type': 'sub_message', 'target': msgid})
message['subscribed'] = bool(subscribed)
qdict = {'message': msgid.upper()}
if bl:
qdict['user'] = {'$nin': list(bl['user'])}
if after:
after_comment = yield objs.Comment.find_one({'id':msgid+'/'+after.split('/')[-1]})
if after_comment:
qdict['date'] = {'$gte': after_comment['date']}
regions = request.regions
replace_banned(regions, message)
if message.get('banned', False):
comments = []
else:
comments = list((yield objs.Comment.find_sort(
qdict, [('date', pymongo.ASCENDING)], limit=10000)))
for comment in comments:
replace_banned(regions, comment, 'comment')
defer.returnValue(dict(
ok=True, format='message_with_replies', cache=5, cache_public=True,
msgid=msgid, message=message.filter_fields(),
replies=[comment.filter_fields() for comment in comments]))
@check_arg(message=MESSAGE_COMMENT_RE, page='[0-9]+')
@defer.inlineCallbacks
def cmd_show(request, message='', user='', tag='', club='', page='0',
show='messages', replies=None, use_bl=False, after='', before=''):
"""Show messages by specified parameters."""
message = canonic_message_comment(message).upper()
bl = get_user_bl(request, use_bl)
if '/' in message:
defer.returnValue((yield showComment(message, request)))
if replies:
if not message:
defer.returnValue(dict(
ok=False,
desc="Error: 'replies' is allowed only with 'message'.",
cache=3600))
defer.returnValue((yield showComments(message, request, bl, after)))
else:
if show not in ['messages', 'recommendations', 'all']:
defer.returnValue(dict(
ok=False, desc="Bad 'show' parameter value."))
parameters = [('tags', tag), ('clubs', club), ('id', message.upper())]
parameters = dict(p for p in parameters if p[1])
if user:
user = canonic_user(user).lower()
if show == 'messages':
user_spec = dict(user=user)
elif show == 'recommendations':
user_spec = dict(recommendations=user)
else:
user_spec = {'$or': [{'user': user}, {
'recommendations': user}]}
parameters.update(user_spec)
elif bl:
if parameters.get('user'):
parameters['user'] = {'$in': [parameters['user']] }
else:
parameters['user'] = {}
parameters['user']['$nin'] = list(bl['user'])
if bl and not tag:
if parameters.get('tags'):
parameters['tags'] = {'$in': [parameters['tags']] }
else:
parameters['tags'] = {}
parameters['tags']['$nin'] = list(bl['tag'])
if before:
befmsg = yield objs.Message.find_one({'id': before})
if befmsg:
parameters['date'] = {'$lt': befmsg['date']}
else:
defer.returnValue(dict(ok=False, desc="Message to search before doesn't exist."))
if after:
afmsg = yield objs.Message.find_one({'id': after})
if afmsg:
parameters['date'] = {'$gt': afmsg['date']}
else:
defer.returnValue(dict(ok=False, desc="Message to search after doesn't exist."))
defer.returnValue((yield showSearch(parameters, int(page), request)))
@require_auth
@defer.inlineCallbacks
def cmd_feed(request, page="0"):
""" Показать ленту """
page = int(page) if page else 0
feed = yield objs.FeedElement.find_sort({'user': request.user['name']},
[('_id', pymongo.DESCENDING)], limit=20, skip=page * 20)
messages = [x.filter_fields() for x in (yield objs.Message.find_sort({'id': {'$in':
[f['message']
for f in feed]
}}, [('date', pymongo.ASCENDING)]))]
regions = request.regions
for message in messages:
replace_banned(regions, message)
defer.returnValue(
dict(ok=True, format="messages",
messages=messages,
desc='Your feed',
cache=5)
)
@defer.inlineCallbacks
def cmd_today(request, use_bl=False):
""" Показать обсуждаемое за последние 24 часа """
bl = get_user_bl(request, use_bl)
for x in range(10):
postids = [x['_id'] for x in (yield objs.Today.find({}, limit=20))]
if len(postids)>0: break
qdict = {'id': {'$in': postids}}
if bl:
qdict['user'] = {'$nin': list(bl['user'])}
qdict['tags'] = {'$nin': list(bl['tag'])}
dbposts = dict(
(x['id'], x.filter_fields())
for x in (yield objs.Message.find(qdict)))
messages = [dbposts[x] for x in postids if (x in dbposts)]
regions = request.regions
for message in messages:
replace_banned(regions, message)
messages = yield set_subscriptions_info(request, messages)
messages.reverse()
defer.returnValue(
dict(ok=True, format="messages",
messages=messages,
desc='Today''s most discussed',
cache=300)
)
@defer.inlineCallbacks
def cmd_today2(request):
""" Показать обсуждаемое за последние 24 часа """
start = time.time() - 86400
messages = [x.filter_fields() for x in (yield objs.Message.find_sort({'date': {'$gte': start}}, [('replycount', pymongo.DESCENDING)], limit=20))]
messages.reverse()
defer.returnValue(
dict(ok=True, format="messages",
messages=messages,
desc='Today''s most discussed',
cache=300)
)
|
{
"content_hash": "1f78253c2f8393e6d6772e579e81c67f",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 149,
"avg_line_length": 38.16872427983539,
"alnum_prop": 0.5633423180592992,
"repo_name": "un-def/bnw",
"id": "bd0cd375c64104514b8ae8e2f0bd9b2ec0e376da",
"size": "9373",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bnw/handlers/command_show.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "9465"
},
{
"name": "HTML",
"bytes": "29497"
},
{
"name": "JavaScript",
"bytes": "52029"
},
{
"name": "Python",
"bytes": "241616"
},
{
"name": "Shell",
"bytes": "2944"
}
],
"symlink_target": ""
}
|
"""Correctness tests for tf.keras CNN models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import keras_correctness_test_base
from tensorflow.python import keras
from tensorflow.python.eager import test
from tensorflow.python.keras.optimizer_v2 import gradient_descent
class DistributionStrategyCnnCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self, initial_weights=None, distribution=None):
with keras_correctness_test_base.MaybeDistributionScope(distribution):
image = keras.layers.Input(shape=(28, 28, 3), name='image')
c1 = keras.layers.Conv2D(
name='conv1', filters=16, kernel_size=(3, 3), strides=(4, 4),
kernel_regularizer=keras.regularizers.l2(1e-4))(
image)
if self.with_batch_norm:
c1 = keras.layers.BatchNormalization(name='bn1')(c1)
c1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(c1)
logits = keras.layers.Dense(
10, activation='softmax', name='pred')(
keras.layers.Flatten()(c1))
model = keras.Model(inputs=[image], outputs=[logits])
if initial_weights:
model.set_weights(initial_weights)
model.compile(
optimizer=gradient_descent.SGD(
learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
return model
def get_data(self,
count=keras_correctness_test_base._GLOBAL_BATCH_SIZE
* keras_correctness_test_base._EVAL_STEPS,
shape=(28, 28, 3),
num_classes=10):
centers = np.random.randn(num_classes, *shape)
features = []
labels = []
for _ in range(count):
label = np.random.randint(0, num_classes, size=1)[0]
offset = np.random.normal(loc=0, scale=0.1, size=np.prod(shape))
offset = offset.reshape(shape)
labels.append(label)
features.append(centers[label] + offset)
x_train = np.asarray(features, dtype=np.float32)
y_train = np.asarray(labels, dtype=np.float32).reshape((count, 1))
x_predict = x_train
return x_train, y_train, x_predict
@combinations.generate(keras_correctness_test_base.
all_strategy_and_input_config_combinations())
def test_cnn_correctness(self, distribution, use_numpy, use_validation_data):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@combinations.generate(keras_correctness_test_base.
all_strategy_and_input_config_combinations())
def test_cnn_with_batch_norm_correctness(self, distribution, use_numpy,
use_validation_data):
self.run_correctness_test(distribution, use_numpy, use_validation_data,
with_batch_norm=True)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "e741cfb2a4adff578b9fbfc16fd7157b",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 39.22784810126582,
"alnum_prop": 0.6615037108744757,
"repo_name": "ageron/tensorflow",
"id": "3c2961456b2eede9570ce29f7a8900834f2ccfb7",
"size": "3788",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distribute/python/keras_image_model_correctness_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644380"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59281238"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1501606"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908340"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94466"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15024"
},
{
"name": "Pascal",
"bytes": "617"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46230508"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481859"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
}
|
"""
Scrapes the cancer wait times at
http://www.england.nhs.uk/statistics/statistical-work-areas/cancer-waiting-times/
and
http://www.england.nhs.uk/statistics/category/statistics/commissioner-waiting-cancer/
"""
import calendar
import datetime
import re
from lxml.html import fromstring, tostring
import requests
import slugify
from publish.lib.helpers import to_markdown, hd
from publish.lib.encoding import fix_bad_unicode
MONTH_DATE_RANGE_RE = re.compile(".*,\s(.*)\sto\s(.*)\s(\d{4}).*")
DATE_RANGE_RE = re.compile(".*Q(\d)\s(\d{4}).*(\d{2})")
MONTHS_LOOKUP = dict((v,k) for k,v in enumerate(calendar.month_name))
def anchor_to_resource(resource):
href = resource.get('href')
return {
"description": resource.text_content().encode('utf8'),
"name": href.split('/')[-1],
"url": href,
"format": href[href.rfind(".")+1:].upper(),
}
def month_to_month_from_title(title):
# Cancer Waiting Times, April to June 2012 - Provider Based
m = MONTH_DATE_RANGE_RE.match(title)
if not m:
print "Not a month range string..."
return "", ""
startm = MONTHS_LOOKUP[m.groups()[0]]
endm = MONTHS_LOOKUP[m.groups()[1]]
year = int(m.groups()[2])
_, mend = calendar.monthrange(year, endm)
start = "{}-{}-{}".format(year, str(startm).zfill(2), 1)
end = "{}-{}-{}".format(year, str(endm).zfill(2), str(mend).zfill(2))
return start, end
def date_range_from_title(title):
# Commissioner-based Cancer Waiting Times for Q3 2014-15
m = DATE_RANGE_RE.match(title)
if not m:
return month_to_month_from_title(title.encode('utf8'))
quarters = {
1: (4, 6),
2: (7, 9),
3: (10, 12),
4: (1, 3),
}
qstart, qend = quarters[int(m.groups()[0])]
yearstart = m.groups()[1]
# fix in 2090 something before the rollover ...
yearend = int("{}{}".format(yearstart[0:2], m.groups()[2]))
yearstart = int(yearstart)
_, mend = calendar.monthrange(yearend, qend)
s = "{}-{}-01".format( yearstart, str(qstart).zfill(2))
e = "{}-{}-{}".format( yearend, str(qend).zfill(2), mend)
print s, e
return s, e
def scrape_commissioner_page(link):
# One of these links is not like the others....
# if link.get('href') == 'http://www.england.nhs.uk/statistics/2012/03/23/cwt-april-to-december-2011/':
# # Special case....
# return None
# Find all the li a underneath the .column.center
html = requests.get(link)
dom = fromstring(html.content)
div = dom.cssselect('.column.center')[0]
title = div.cssselect('h1')[0].text_content().strip()
links = div.cssselect('li a')
if len(links) == 0:
links = div.cssselect('a')
resources = [anchor_to_resource(link) for link in links]
resources = [r for r in resources if len(r['format']) <= 4 ]
dataset = {}
drs, dre = date_range_from_title(title)
dataset['title'] = title
dataset['name'] = slugify.slugify(title).lower()
if len(div.cssselect('article p')) > 0:
dataset["notes"] = to_markdown( fix_bad_unicode(unicode(tostring(div.cssselect('article p')[0]))) )
else:
dataset['notes'] = to_markdown( fix_bad_unicode(unicode(tostring(div.cssselect('p')[0]))) )
dataset["tags"] = ["CWT"]
dataset["resources"] = resources
dataset["origin"] = link.get('href')
dataset["groups"] = ['cwt']
if drs:
dataset["coverage_start_date"] = drs
if dre:
dataset["coverage_end_date"] = dre
dataset["frequency"] = "Quarterly"
return dataset
def multipage_fetch(root):
datasets = []
fourohfour = False
page = 1
links = []
while not fourohfour:
html = requests.get(root.format(page))
fourohfour = html.status_code == 404
if fourohfour:
break
page = page + 1
dom = fromstring(html.content)
links.extend(dom.cssselect('h2 a'))
for link in links:
datasets.append(scrape_commissioner_page(link))
return datasets
def commissioner_based():
return multipage_fetch("http://www.england.nhs.uk/statistics/category/statistics/commissioner-waiting-cancer/page/{}/")
def default_cwt():
return multipage_fetch("http://www.england.nhs.uk/statistics/category/statistics/provider-waiting-cancer/page/{}/")
def scrape(workspace):
print "Scraping CWT with workspace {}".format(workspace)
datasets = []
bases = [
'http://www.england.nhs.uk/statistics/statistical-work-areas/cancer-waiting-times/provider-based-cancer-waiting-times-statistics/',
'http://www.england.nhs.uk/statistics/statistical-work-areas/cancer-waiting-times/commissioner-based-cancer-waiting-times-statistics/'
]
targets = []
for base in bases:
html = requests.get(base)
page = fromstring(html.content)
h3 = hd([h for h in page.cssselect('h3') if h.text_content().strip().lower() == 'latest statistics'])
links = [a.get('href') for a in h3.getnext().cssselect('a')]
for l in links:
print l
targets += links
for t in targets:
datasets.append(scrape_commissioner_page(t))
# datasets.extend(commissioner_based())
# datasets.extend(default_cwt())
datasets = filter(lambda x: x is not None, datasets)
return datasets
|
{
"content_hash": "0d363c57998db0a18adf82654d998eb5",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 142,
"avg_line_length": 30.185393258426966,
"alnum_prop": 0.6205099571933743,
"repo_name": "nhsengland/publish-o-matic",
"id": "76b4bc0c18cbddb589dd390e346af9e61f4d431b",
"size": "5373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datasets/nhse_stats/topics/cwt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "399397"
}
],
"symlink_target": ""
}
|
import datetime
import inspect
import typing
import marshmallow
from dateutil import relativedelta
from marshmallow import Schema, fields, validate
from marshmallow_oneofschema import OneOfSchema
from airflow.models.mappedoperator import MappedOperator
from airflow.serialization.serialized_objects import SerializedBaseOperator
from airflow.utils.weight_rule import WeightRule
class CronExpression(typing.NamedTuple):
"""Cron expression schema"""
value: str
class TimeDeltaSchema(Schema):
"""Time delta schema"""
objectType = fields.Constant("TimeDelta", data_key="__type")
days = fields.Integer()
seconds = fields.Integer()
microseconds = fields.Integer()
@marshmallow.post_load
def make_time_delta(self, data, **kwargs):
"""Create time delta based on data"""
if "objectType" in data:
del data["objectType"]
return datetime.timedelta(**data)
class RelativeDeltaSchema(Schema):
"""Relative delta schema"""
objectType = fields.Constant("RelativeDelta", data_key="__type")
years = fields.Integer()
months = fields.Integer()
days = fields.Integer()
leapdays = fields.Integer()
hours = fields.Integer()
minutes = fields.Integer()
seconds = fields.Integer()
microseconds = fields.Integer()
year = fields.Integer()
month = fields.Integer()
day = fields.Integer()
hour = fields.Integer()
minute = fields.Integer()
second = fields.Integer()
microsecond = fields.Integer()
@marshmallow.post_load
def make_relative_delta(self, data, **kwargs):
"""Create relative delta based on data"""
if "objectType" in data:
del data["objectType"]
return relativedelta.relativedelta(**data)
class CronExpressionSchema(Schema):
"""Cron expression schema"""
objectType = fields.Constant("CronExpression", data_key="__type")
value = fields.String(required=True)
@marshmallow.post_load
def make_cron_expression(self, data, **kwargs):
"""Create cron expression based on data"""
return CronExpression(data["value"])
class ScheduleIntervalSchema(OneOfSchema):
"""
Schedule interval.
It supports the following types:
* TimeDelta
* RelativeDelta
* CronExpression
"""
type_field = "__type"
type_schemas = {
"TimeDelta": TimeDeltaSchema,
"RelativeDelta": RelativeDeltaSchema,
"CronExpression": CronExpressionSchema,
}
def _dump(self, obj, update_fields=True, **kwargs):
if isinstance(obj, str):
obj = CronExpression(obj)
return super()._dump(obj, update_fields=update_fields, **kwargs)
def get_obj_type(self, obj):
"""Select schema based on object type"""
if isinstance(obj, datetime.timedelta):
return "TimeDelta"
elif isinstance(obj, relativedelta.relativedelta):
return "RelativeDelta"
elif isinstance(obj, CronExpression):
return "CronExpression"
else:
raise Exception(f"Unknown object type: {obj.__class__.__name__}")
class ColorField(fields.String):
"""Schema for color property"""
def __init__(self, **metadata):
super().__init__(**metadata)
self.validators = [validate.Regexp("^#[a-fA-F0-9]{3,6}$")] + list(self.validators)
class WeightRuleField(fields.String):
"""Schema for WeightRule"""
def __init__(self, **metadata):
super().__init__(**metadata)
self.validators = [validate.OneOf(WeightRule.all_weight_rules())] + list(self.validators)
class TimezoneField(fields.String):
"""Schema for timezone"""
class ClassReferenceSchema(Schema):
"""Class reference schema."""
module_path = fields.Method("_get_module", required=True)
class_name = fields.Method("_get_class_name", required=True)
def _get_module(self, obj):
if isinstance(obj, (MappedOperator, SerializedBaseOperator)):
return obj._task_module
return inspect.getmodule(obj).__name__
def _get_class_name(self, obj):
if isinstance(obj, (MappedOperator, SerializedBaseOperator)):
return obj._task_type
if isinstance(obj, type):
return obj.__name__
return type(obj).__name__
|
{
"content_hash": "6a56f50127f666d8c3266134102dc075",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 97,
"avg_line_length": 28.653333333333332,
"alnum_prop": 0.6537924616100512,
"repo_name": "bolkedebruin/airflow",
"id": "1c10421ea201ce4caebb17f10a71423e3fc3aa7d",
"size": "5084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/api_connexion/schemas/common_schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/droid/shared_armor_module_3.iff"
result.attribute_template_id = -1
result.stfName("craft_droid_ingredients_n","armor_module_3")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "b1b96415b06f5d4a8b4be75fda1f73b1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.7051671732522796,
"repo_name": "anhstudios/swganh",
"id": "fc770aa70f1b203a58a1bc3173af4a75318bfd02",
"size": "474",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/component/droid/shared_armor_module_3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from .uwsgi import UWSGIProvider
from .supervisor import SupervisorProvider
from ...app import blueprint
def get_provider(name):
"""
Get provider instance by name.
:param name: Provider name (blueprint)
:return: <provider>
"""
if name == 'uwsgi':
return UWSGIProvider()
elif name == 'supervisor':
return SupervisorProvider()
else:
raise NotImplementedError('"{}" is not a valid application provider'.format(name))
def get_providers(host=None):
"""
Get configured web/worker providers by host.
:param host: Provider host filter
:return: dict(web=<provider>, worker=<provider>)
"""
providers = {}
web_hosts = blueprint.get('web.hosts')
# Filter out bad values
web_hosts = [host for host in web_hosts if host]
web_provider = blueprint.get('web.provider')
if web_provider:
providers[web_provider] = get_provider(web_provider)
worker_hosts = blueprint.get('worker.hosts')
# Filter out bad values
worker_hosts = [host for host in worker_hosts if host]
worker_provider = blueprint.get('worker.provider')
if worker_provider and worker_provider not in providers:
providers[worker_provider] = get_provider(worker_provider)
if web_provider and (not web_hosts or host in web_hosts):
providers['web'] = providers[web_provider]
if worker_provider and (not worker_hosts or host in worker_hosts):
providers['worker'] = providers[worker_provider]
# Remove provider name keys
provider_name_keys = {worker_provider, web_provider}
for provider_name in provider_name_keys:
if provider_name:
providers.pop(provider_name)
return providers
|
{
"content_hash": "89cfd117e2604f88e400e9a20d95bd66",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 90,
"avg_line_length": 30.785714285714285,
"alnum_prop": 0.6716937354988399,
"repo_name": "adisbladis/blues",
"id": "16eea8a2c96c062346ad4aab6357765db1464dc7",
"size": "1724",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "blues/application/providers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2300"
},
{
"name": "Nginx",
"bytes": "2892"
},
{
"name": "Python",
"bytes": "136720"
},
{
"name": "Shell",
"bytes": "2706"
}
],
"symlink_target": ""
}
|
"""
Tests For HostManager
"""
from oslo.config import cfg
from cinder import db
from cinder import exception
from cinder.openstack.common.scheduler import filters
from cinder.openstack.common import timeutils
from cinder.scheduler import host_manager
from cinder import test
from cinder.tests.scheduler import fakes
CONF = cfg.CONF
class FakeFilterClass1(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class FakeFilterClass2(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class HostManagerTestCase(test.TestCase):
"""Test case for HostManager class"""
def setUp(self):
super(HostManagerTestCase, self).setUp()
self.host_manager = host_manager.HostManager()
self.fake_hosts = [host_manager.HostState('fake_host%s' % x)
for x in xrange(1, 5)]
def test_choose_host_filters_not_found(self):
self.flags(scheduler_default_filters='FakeFilterClass3')
self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2]
self.assertRaises(exception.SchedulerHostFilterNotFound,
self.host_manager._choose_host_filters, None)
def test_choose_host_filters(self):
self.flags(scheduler_default_filters=['FakeFilterClass2'])
self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2]
# Test 'volume' returns 1 correct function
filter_classes = self.host_manager._choose_host_filters(None)
self.assertEqual(len(filter_classes), 1)
self.assertEqual(filter_classes[0].__name__, 'FakeFilterClass2')
def _mock_get_filtered_hosts(self, info, specified_filters=None):
self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
info['got_objs'] = []
info['got_fprops'] = []
def fake_filter_one(_self, obj, filter_props):
info['got_objs'].append(obj)
info['got_fprops'].append(filter_props)
return True
self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
self.host_manager._choose_host_filters(specified_filters).AndReturn(
[FakeFilterClass1])
def _verify_result(self, info, result):
for x in info['got_fprops']:
self.assertEqual(x, info['expected_fprops'])
self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
self.assertEqual(set(result), set(info['got_objs']))
def test_get_filtered_hosts(self):
fake_properties = {'moo': 1, 'cow': 2}
info = {'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_update_service_capabilities(self):
service_states = self.host_manager.service_states
self.assertDictMatch(service_states, {})
self.mox.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().AndReturn(31337)
timeutils.utcnow().AndReturn(31338)
timeutils.utcnow().AndReturn(31339)
host1_volume_capabs = dict(free_capacity_gb=4321, timestamp=1)
host2_volume_capabs = dict(free_capacity_gb=5432, timestamp=1)
host3_volume_capabs = dict(free_capacity_gb=6543, timestamp=1)
self.mox.ReplayAll()
service_name = 'volume'
self.host_manager.update_service_capabilities(service_name, 'host1',
host1_volume_capabs)
self.host_manager.update_service_capabilities(service_name, 'host2',
host2_volume_capabs)
self.host_manager.update_service_capabilities(service_name, 'host3',
host3_volume_capabs)
# Make sure dictionary isn't re-assigned
self.assertEqual(self.host_manager.service_states, service_states)
# Make sure original dictionary wasn't copied
self.assertEqual(host1_volume_capabs['timestamp'], 1)
host1_volume_capabs['timestamp'] = 31337
host2_volume_capabs['timestamp'] = 31338
host3_volume_capabs['timestamp'] = 31339
expected = {'host1': host1_volume_capabs,
'host2': host2_volume_capabs,
'host3': host3_volume_capabs}
self.assertDictMatch(service_states, expected)
def test_get_all_host_states(self):
context = 'fake_context'
topic = CONF.volume_topic
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(host_manager.LOG, 'warn')
self.mox.StubOutWithMock(host_manager.utils, 'service_is_up')
services = [
dict(id=1, host='host1', topic='volume', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=2, host='host2', topic='volume', disabled=False,
availability_zone='zone1', updated_at=timeutils.utcnow()),
dict(id=3, host='host3', topic='volume', disabled=False,
availability_zone='zone2', updated_at=timeutils.utcnow()),
dict(id=4, host='host4', topic='volume', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow()),
# service on host5 is disabled
dict(id=5, host='host5', topic='volume', disabled=True,
availability_zone='zone4', updated_at=timeutils.utcnow()),
]
db.service_get_all_by_topic(context, topic).AndReturn(services)
host_manager.utils.service_is_up(services[0]).AndReturn(True)
host_manager.utils.service_is_up(services[1]).AndReturn(True)
host_manager.utils.service_is_up(services[2]).AndReturn(True)
host_manager.utils.service_is_up(services[3]).AndReturn(True)
host_manager.utils.service_is_up(services[4]).AndReturn(True)
# Disabled service
host_manager.LOG.warn("volume service is down or disabled. "
"(host: host5)")
db.service_get_all_by_topic(context, topic).AndReturn(services)
host_manager.utils.service_is_up(services[0]).AndReturn(True)
host_manager.utils.service_is_up(services[1]).AndReturn(True)
host_manager.utils.service_is_up(services[2]).AndReturn(True)
host_manager.utils.service_is_up(services[3]).AndReturn(False)
# Stopped service
host_manager.LOG.warn("volume service is down or disabled. "
"(host: host4)")
host_manager.utils.service_is_up(services[4]).AndReturn(True)
# Disabled service
host_manager.LOG.warn("volume service is down or disabled. "
"(host: host5)")
self.mox.ReplayAll()
self.host_manager.get_all_host_states(context)
host_state_map = self.host_manager.host_state_map
self.assertEqual(len(host_state_map), 4)
# Check that service is up
for i in xrange(4):
volume_node = services[i]
host = volume_node['host']
self.assertEqual(host_state_map[host].service,
volume_node)
self.host_manager.get_all_host_states(context)
host_state_map = self.host_manager.host_state_map
self.assertEqual(len(host_state_map), 3)
for i in xrange(3):
volume_node = services[i]
host = volume_node['host']
self.assertEqual(host_state_map[host].service,
volume_node)
class HostStateTestCase(test.TestCase):
"""Test case for HostState class"""
def test_update_from_volume_capability(self):
fake_host = host_manager.HostState('host1')
self.assertIsNone(fake_host.free_capacity_gb)
volume_capability = {'total_capacity_gb': 1024,
'free_capacity_gb': 512,
'reserved_percentage': 0,
'timestamp': None}
fake_host.update_from_volume_capability(volume_capability)
self.assertEqual(fake_host.free_capacity_gb, 512)
def test_update_from_volume_infinite_capability(self):
fake_host = host_manager.HostState('host1')
self.assertIsNone(fake_host.free_capacity_gb)
volume_capability = {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'infinite',
'reserved_percentage': 0,
'timestamp': None}
fake_host.update_from_volume_capability(volume_capability)
self.assertEqual(fake_host.total_capacity_gb, 'infinite')
self.assertEqual(fake_host.free_capacity_gb, 'infinite')
def test_update_from_volume_unknown_capability(self):
fake_host = host_manager.HostState('host1')
self.assertIsNone(fake_host.free_capacity_gb)
volume_capability = {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'timestamp': None}
fake_host.update_from_volume_capability(volume_capability)
self.assertEqual(fake_host.total_capacity_gb, 'infinite')
self.assertEqual(fake_host.free_capacity_gb, 'unknown')
|
{
"content_hash": "960d50472260827fb0bd19a5f46fef6f",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 76,
"avg_line_length": 41.83116883116883,
"alnum_prop": 0.6093345751836904,
"repo_name": "ntt-sic/cinder",
"id": "01bc85904150f3a50f2e3c1655c9370a0892f870",
"size": "10302",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/tests/scheduler/test_host_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5200214"
},
{
"name": "Shell",
"bytes": "8994"
}
],
"symlink_target": ""
}
|
import os
import random
import re
from collections import OrderedDict
from dataclasses import dataclass
from typing import Any, Dict, Optional
from django.conf import settings
from django.http import HttpRequest, HttpResponse, HttpResponseNotFound
from django.template import loader
from django.views.generic import TemplateView
from zerver.context_processors import zulip_default_context
from zerver.decorator import add_google_analytics_context
from zerver.lib.integrations import (
CATEGORIES,
INTEGRATIONS,
META_CATEGORY,
HubotIntegration,
WebhookIntegration,
)
from zerver.lib.request import REQ, RequestNotes, has_request_variables
from zerver.lib.subdomains import get_subdomain
from zerver.lib.templates import render_markdown_path
from zerver.models import Realm
from zerver.openapi.openapi import get_endpoint_from_operationid, get_openapi_summary
@dataclass
class DocumentationArticle:
article_path: str
article_http_status: int
endpoint_path: Optional[str]
endpoint_method: Optional[str]
def add_api_uri_context(context: Dict[str, Any], request: HttpRequest) -> None:
context.update(zulip_default_context(request))
subdomain = get_subdomain(request)
if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN or not settings.ROOT_DOMAIN_LANDING_PAGE:
display_subdomain = subdomain
html_settings_links = True
else:
display_subdomain = "yourZulipDomain"
html_settings_links = False
display_host = Realm.host_for_subdomain(display_subdomain)
api_url_scheme_relative = display_host + "/api"
api_url = settings.EXTERNAL_URI_SCHEME + api_url_scheme_relative
zulip_url = settings.EXTERNAL_URI_SCHEME + display_host
context["external_uri_scheme"] = settings.EXTERNAL_URI_SCHEME
context["api_url"] = api_url
context["api_url_scheme_relative"] = api_url_scheme_relative
context["zulip_url"] = zulip_url
context["html_settings_links"] = html_settings_links
if html_settings_links:
settings_html = '<a href="/#settings">Zulip settings page</a>'
subscriptions_html = '<a target="_blank" href="/#streams">streams page</a>'
else:
settings_html = "Zulip settings page"
subscriptions_html = "streams page"
context["settings_html"] = settings_html
context["subscriptions_html"] = subscriptions_html
class ApiURLView(TemplateView):
def get_context_data(self, **kwargs: Any) -> Dict[str, str]:
context = super().get_context_data(**kwargs)
add_api_uri_context(context, self.request)
return context
class MarkdownDirectoryView(ApiURLView):
path_template = ""
policies_view = False
def get_path(self, article: str) -> DocumentationArticle:
http_status = 200
if article == "":
article = "index"
elif article == "include/sidebar_index":
pass
elif "/" in article:
article = "missing"
http_status = 404
elif len(article) > 100 or not re.match("^[0-9a-zA-Z_-]+$", article):
article = "missing"
http_status = 404
path = self.path_template % (article,)
endpoint_name = None
endpoint_method = None
if self.policies_view and self.path_template.startswith("/"):
# This block is required because neither the Django
# template loader nor the article_path logic below support
# settings.POLICIES_DIRECTORY being an absolute path.
if not os.path.exists(path):
article = "missing"
http_status = 404
path = self.path_template % (article,)
return DocumentationArticle(
article_path=path,
article_http_status=http_status,
endpoint_path=None,
endpoint_method=None,
)
# The following is a somewhat hacky approach to extract titles from articles.
# Hack: `context["article"] has a leading `/`, so we use + to add directories.
article_path = os.path.join(settings.DEPLOY_ROOT, "templates") + path
if (not os.path.exists(article_path)) and self.path_template == "/zerver/api/%s.md":
try:
endpoint_name, endpoint_method = get_endpoint_from_operationid(article)
path = "/zerver/api/api-doc-template.md"
except AssertionError:
return DocumentationArticle(
article_path=self.path_template % ("missing",),
article_http_status=404,
endpoint_path=None,
endpoint_method=None,
)
try:
loader.get_template(path)
return DocumentationArticle(
article_path=path,
article_http_status=http_status,
endpoint_path=endpoint_name,
endpoint_method=endpoint_method,
)
except loader.TemplateDoesNotExist:
return DocumentationArticle(
article_path=self.path_template % ("missing",),
article_http_status=404,
endpoint_path=None,
endpoint_method=None,
)
def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
article = kwargs["article"]
context: Dict[str, Any] = super().get_context_data()
documentation_article = self.get_path(article)
context["article"] = documentation_article.article_path
if documentation_article.article_path.startswith("/") and os.path.exists(
documentation_article.article_path
):
# Absolute path case
article_path = documentation_article.article_path
elif documentation_article.article_path.startswith("/"):
# Hack: `context["article"] has a leading `/`, so we use + to add directories.
article_path = (
os.path.join(settings.DEPLOY_ROOT, "templates") + documentation_article.article_path
)
else:
article_path = os.path.join(
settings.DEPLOY_ROOT, "templates", documentation_article.article_path
)
# For disabling the "Back to home" on the homepage
context["not_index_page"] = not context["article"].endswith("/index.md")
if self.path_template == "/zerver/help/%s.md":
context["page_is_help_center"] = True
context["doc_root"] = "/help/"
context["doc_root_title"] = "Help center"
sidebar_article = self.get_path("include/sidebar_index")
sidebar_index = sidebar_article.article_path
title_base = "Zulip help center"
elif self.path_template == f"{settings.POLICIES_DIRECTORY}/%s.md":
context["page_is_policy_center"] = True
context["doc_root"] = "/policies/"
context["doc_root_title"] = "Terms and policies"
sidebar_article = self.get_path("sidebar_index")
sidebar_index = sidebar_article.article_path
title_base = "Zulip terms and policies"
else:
context["page_is_api_center"] = True
context["doc_root"] = "/api/"
context["doc_root_title"] = "API documentation"
sidebar_article = self.get_path("sidebar_index")
sidebar_index = sidebar_article.article_path
title_base = "Zulip API documentation"
# The following is a somewhat hacky approach to extract titles from articles.
endpoint_name = None
endpoint_method = None
if os.path.exists(article_path):
with open(article_path) as article_file:
first_line = article_file.readlines()[0]
# Strip the header and then use the first line to get the article title
if context["article"] == "/zerver/api/api-doc-template.md":
endpoint_name, endpoint_method = (
documentation_article.endpoint_path,
documentation_article.endpoint_method,
)
assert endpoint_name is not None
assert endpoint_method is not None
article_title = get_openapi_summary(endpoint_name, endpoint_method)
elif (
self.path_template == "/zerver/api/%s.md" and "{generate_api_header(" in first_line
):
api_operation = context["PAGE_METADATA_URL"].split("/api/")[1]
endpoint_name, endpoint_method = get_endpoint_from_operationid(api_operation)
article_title = get_openapi_summary(endpoint_name, endpoint_method)
else:
article_title = first_line.lstrip("#").strip()
endpoint_name = endpoint_method = None
if context["not_index_page"]:
context["PAGE_TITLE"] = f"{article_title} | {title_base}"
else:
context["PAGE_TITLE"] = title_base
request_notes = RequestNotes.get_notes(self.request)
request_notes.placeholder_open_graph_description = (
f"REPLACEMENT_PAGE_DESCRIPTION_{int(2**24 * random.random())}"
)
context["PAGE_DESCRIPTION"] = request_notes.placeholder_open_graph_description
context["sidebar_index"] = sidebar_index
# An "article" might require the api_uri_context to be rendered
api_uri_context: Dict[str, Any] = {}
add_api_uri_context(api_uri_context, self.request)
api_uri_context["run_content_validators"] = True
context["api_uri_context"] = api_uri_context
if endpoint_name and endpoint_method:
context["api_uri_context"]["API_ENDPOINT_NAME"] = endpoint_name + ":" + endpoint_method
add_google_analytics_context(context)
return context
def get(
self, request: HttpRequest, *args: object, article: str = "", **kwargs: object
) -> HttpResponse:
# Hack: It's hard to reinitialize urls.py from tests, and so
# we want to defer the use of settings.POLICIES_DIRECTORY to
# runtime.
if self.policies_view:
self.path_template = f"{settings.POLICIES_DIRECTORY}/%s.md"
documentation_article = self.get_path(article)
http_status = documentation_article.article_http_status
result = super().get(request, article=article)
if http_status != 200:
result.status_code = http_status
return result
def add_integrations_context(context: Dict[str, Any]) -> None:
alphabetical_sorted_categories = OrderedDict(sorted(CATEGORIES.items()))
alphabetical_sorted_integration = OrderedDict(sorted(INTEGRATIONS.items()))
enabled_integrations_count = len(list(filter(lambda v: v.is_enabled(), INTEGRATIONS.values())))
# Subtract 1 so saying "Over X integrations" is correct. Then,
# round down to the nearest multiple of 10.
integrations_count_display = ((enabled_integrations_count - 1) // 10) * 10
context["categories_dict"] = alphabetical_sorted_categories
context["integrations_dict"] = alphabetical_sorted_integration
context["integrations_count_display"] = integrations_count_display
def add_integrations_open_graph_context(context: Dict[str, Any], request: HttpRequest) -> None:
path_name = request.path.rstrip("/").split("/")[-1]
description = (
"Zulip comes with over a hundred native integrations out of the box, "
"and integrates with Zapier and IFTTT to provide hundreds more. "
"Connect the apps you use every day to Zulip."
)
if path_name in INTEGRATIONS:
integration = INTEGRATIONS[path_name]
context["PAGE_TITLE"] = f"{integration.display_name} | Zulip integrations"
context["PAGE_DESCRIPTION"] = description
elif path_name in CATEGORIES:
category = CATEGORIES[path_name]
if path_name in META_CATEGORY:
context["PAGE_TITLE"] = f"{category} | Zulip integrations"
else:
context["PAGE_TITLE"] = f"{category} tools | Zulip integrations"
context["PAGE_DESCRIPTION"] = description
elif path_name == "integrations":
context["PAGE_TITLE"] = "Zulip integrations"
context["PAGE_DESCRIPTION"] = description
class IntegrationView(ApiURLView):
template_name = "zerver/integrations/index.html"
def get_context_data(self, **kwargs: Any) -> Dict[str, Any]:
context: Dict[str, Any] = super().get_context_data(**kwargs)
add_integrations_context(context)
add_integrations_open_graph_context(context, self.request)
add_google_analytics_context(context)
return context
@has_request_variables
def integration_doc(request: HttpRequest, integration_name: str = REQ()) -> HttpResponse:
# FIXME: This check is jQuery-specific.
if request.headers.get("x-requested-with") != "XMLHttpRequest":
return HttpResponseNotFound()
try:
integration = INTEGRATIONS[integration_name]
except KeyError:
return HttpResponseNotFound()
context: Dict[str, Any] = {}
add_api_uri_context(context, request)
context["integration_name"] = integration.name
context["integration_display_name"] = integration.display_name
context["recommended_stream_name"] = integration.stream_name
if isinstance(integration, WebhookIntegration):
context["integration_url"] = integration.url[3:]
if (
hasattr(integration.function, "_all_event_types")
and integration.function._all_event_types is not None
):
context["all_event_types"] = integration.function._all_event_types
if isinstance(integration, HubotIntegration):
context["hubot_docs_url"] = integration.hubot_docs_url
doc_html_str = render_markdown_path(integration.doc, context)
return HttpResponse(doc_html_str)
|
{
"content_hash": "7b407080831bd69b9937503ef290087c",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 100,
"avg_line_length": 42.32218844984803,
"alnum_prop": 0.6312122953174375,
"repo_name": "rht/zulip",
"id": "6c4641a000d014a80dae2814a7a9e7225fc63ee8",
"size": "13924",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "zerver/views/documentation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "489438"
},
{
"name": "Dockerfile",
"bytes": "4025"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "743287"
},
{
"name": "Handlebars",
"bytes": "374049"
},
{
"name": "JavaScript",
"bytes": "4000260"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112128"
},
{
"name": "Python",
"bytes": "10160680"
},
{
"name": "Ruby",
"bytes": "3459"
},
{
"name": "Shell",
"bytes": "146797"
},
{
"name": "TypeScript",
"bytes": "284836"
}
],
"symlink_target": ""
}
|
import os
import sys
from tempfile import TemporaryFile
from subprocess import Popen, call, STDOUT
import Selenium2Library
WEBSERVER = "target/server.py"
OUTDIR = "output"
def start_web_server():
Popen(['python', WEBSERVER, 'start'], stdout=TemporaryFile(), stderr=STDOUT)
def stop_web_server():
call(['python', WEBSERVER, 'stop'], stdout=TemporaryFile(), stderr=STDOUT)
def run_tests(args):
start_web_server()
call(['mkdir', '-p', OUTDIR])
call(['pybot', '--outputdir', OUTDIR] + args, shell=(os.sep == '\\'))
stop_web_server()
if __name__ == '__main__':
run_tests(sys.argv[1:])
|
{
"content_hash": "2ac73ebc12d5ae6f30bc5af9349c089c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 80,
"avg_line_length": 24.6,
"alnum_prop": 0.6585365853658537,
"repo_name": "maartenvds/robot-framework-examples",
"id": "57cf4fbce15604eb640cc7687ee8ae273631d679",
"size": "639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selenium/run.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "563"
},
{
"name": "CSS",
"bytes": "417"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "6552"
},
{
"name": "Shell",
"bytes": "82"
}
],
"symlink_target": ""
}
|
import re
# Valid values of result type.
RESULT_TYPES = {'unimportant': 'RESULT ',
'default': '*RESULT ',
'informational': ''}
def _EscapePerfResult(s):
"""Escapes |s| for use in a perf result."""
# Colons (:) and equal signs (=) are not allowed, and we chose an arbitrary
# limit of 40 chars.
return re.sub(':|=', '_', s[:40])
def PrintPerfResult(measurement, trace, values, units, result_type='default',
print_to_stdout=True):
"""Prints numerical data to stdout in the format required by perf tests.
The string args may be empty but they must not contain any colons (:) or
equals signs (=).
Args:
measurement: A description of the quantity being measured, e.g. "vm_peak".
trace: A description of the particular data point, e.g. "reference".
values: A list of numeric measured values.
units: A description of the units of measure, e.g. "bytes".
result_type: A tri-state that accepts values of ['unimportant', 'default',
'informational']. 'unimportant' prints RESULT, 'default' prints *RESULT
and 'informational' prints nothing.
print_to_stdout: If True, prints the output in stdout instead of returning
the output to caller.
Returns:
String of the formated perf result.
"""
assert result_type in RESULT_TYPES, 'result type: %s is invalid' % result_type
assert isinstance(values, list)
assert len(values)
assert '/' not in measurement
avg = None
if len(values) > 1:
try:
value = '[%s]' % ','.join([str(v) for v in values])
avg = sum([float(v) for v in values]) / len(values)
except ValueError:
value = ", ".join(values)
else:
value = values[0]
trace_name = _EscapePerfResult(trace)
output = '%s%s: %s%s%s %s' % (
RESULT_TYPES[result_type],
_EscapePerfResult(measurement),
trace_name,
# Do not show equal sign if the trace is empty. Usually it happens when
# measurement is enough clear to describe the result.
'= ' if trace_name else '',
value,
units)
if avg:
output += '\nAvg %s: %f%s' % (measurement, avg, units)
if print_to_stdout:
print output
return output
|
{
"content_hash": "1127815f9e93919af1869323a4ff3a57",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 80,
"avg_line_length": 32.776119402985074,
"alnum_prop": 0.6397996357012751,
"repo_name": "keishi/chromium",
"id": "193442ede9dd1df3783defe5fbe1cac45856404e",
"size": "2363",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "build/android/pylib/perf_tests_helper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1172794"
},
{
"name": "C",
"bytes": "67452317"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "132681259"
},
{
"name": "F#",
"bytes": "381"
},
{
"name": "Go",
"bytes": "19048"
},
{
"name": "Java",
"bytes": "361412"
},
{
"name": "JavaScript",
"bytes": "16603687"
},
{
"name": "Objective-C",
"bytes": "9609581"
},
{
"name": "PHP",
"bytes": "97796"
},
{
"name": "Perl",
"bytes": "918683"
},
{
"name": "Python",
"bytes": "6407891"
},
{
"name": "R",
"bytes": "524"
},
{
"name": "Shell",
"bytes": "4192593"
},
{
"name": "Tcl",
"bytes": "277077"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
import contextlib
import logging
import socket
from textwrap import dedent
from docker.errors import APIError
from requests.exceptions import ConnectionError as RequestsConnectionError
from requests.exceptions import ReadTimeout
from requests.exceptions import SSLError
from requests.packages.urllib3.exceptions import ReadTimeoutError
from ..const import API_VERSION_TO_ENGINE_VERSION
from ..const import HTTP_TIMEOUT
from .utils import call_silently
from .utils import is_docker_for_mac_installed
from .utils import is_mac
from .utils import is_ubuntu
log = logging.getLogger(__name__)
class UserError(Exception):
def __init__(self, msg):
self.msg = dedent(msg).strip()
def __unicode__(self):
return self.msg
__str__ = __unicode__
class ConnectionError(Exception):
pass
@contextlib.contextmanager
def handle_connection_errors(client):
try:
yield
except SSLError as e:
log.error('SSL error: %s' % e)
raise ConnectionError()
except RequestsConnectionError as e:
if e.args and isinstance(e.args[0], ReadTimeoutError):
log_timeout_error()
raise ConnectionError()
exit_with_error(get_conn_error_message(client.base_url))
except APIError as e:
log_api_error(e, client.api_version)
raise ConnectionError()
except (ReadTimeout, socket.timeout) as e:
log_timeout_error()
raise ConnectionError()
def log_timeout_error():
log.error(
"An HTTP request took too long to complete. Retry with --verbose to "
"obtain debug information.\n"
"If you encounter this issue regularly because of slow network "
"conditions, consider setting COMPOSE_HTTP_TIMEOUT to a higher "
"value (current value: %s)." % HTTP_TIMEOUT)
def log_api_error(e, client_version):
if b'client is newer than server' not in e.explanation:
log.error(e.explanation)
return
version = API_VERSION_TO_ENGINE_VERSION.get(client_version)
if not version:
# They've set a custom API version
log.error(e.explanation)
return
log.error(
"The Docker Engine version is less than the minimum required by "
"Compose. Your current project requires a Docker Engine of "
"version {version} or greater.".format(version=version))
def exit_with_error(msg):
log.error(dedent(msg).strip())
raise ConnectionError()
def get_conn_error_message(url):
if call_silently(['which', 'docker']) != 0:
if is_mac():
return docker_not_found_mac
if is_ubuntu():
return docker_not_found_ubuntu
return docker_not_found_generic
if is_docker_for_mac_installed():
return conn_error_docker_for_mac
if call_silently(['which', 'docker-machine']) == 0:
return conn_error_docker_machine
return conn_error_generic.format(url=url)
docker_not_found_mac = """
Couldn't connect to Docker daemon. You might need to install Docker:
https://docs.docker.com/engine/installation/mac/
"""
docker_not_found_ubuntu = """
Couldn't connect to Docker daemon. You might need to install Docker:
https://docs.docker.com/engine/installation/ubuntulinux/
"""
docker_not_found_generic = """
Couldn't connect to Docker daemon. You might need to install Docker:
https://docs.docker.com/engine/installation/
"""
conn_error_docker_machine = """
Couldn't connect to Docker daemon - you might need to run `docker-machine start default`.
"""
conn_error_docker_for_mac = """
Couldn't connect to Docker daemon. You might need to start Docker for Mac.
"""
conn_error_generic = """
Couldn't connect to Docker daemon at {url} - is it running?
If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable.
"""
|
{
"content_hash": "983718ea4011cce22e71050a508a98e1",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 98,
"avg_line_length": 28.085714285714285,
"alnum_prop": 0.6833672431332655,
"repo_name": "denverdino/compose",
"id": "89a7a9492b11ba97f581b012187e54775a1c1b03",
"size": "3932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compose/cli/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "2600"
},
{
"name": "Python",
"bytes": "617288"
},
{
"name": "Shell",
"bytes": "25587"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def binop_eq():
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris_wheader.csv"))
rows, cols = iris.dim
iris.show()
#frame/scaler
res = iris == 4.7
res_rows, res_cols = res.dim
assert res_rows == rows and res_cols == cols, "dimension mismatch"
new_rows = iris[res[0]].nrow
assert new_rows == 2, "wrong number of rows returned"
res = 3.5 == iris
res_rows, res_cols = res.dim
assert res_rows == rows and res_cols == cols, "dimension mismatch"
new_rows = iris[res[1]].nrow
assert new_rows == 6, "wrong number of rows returned"
#frame/vec
#try:
# res = iris == iris[0]
# res.show()
# assert False, "expected error. objects of different dimensions not supported."
#except EnvironmentError:
# pass
#try:
# res = iris[2] == iris
# res.show()
# assert False, "expected error. objects of different dimensions not supported."
#except EnvironmentError:
# pass
#vec/vec
res = iris[0] == iris[1]
res_rows = res.nrow
assert res_rows == rows, "dimension mismatch"
new_rows = iris[res].nrow
assert new_rows == 0, "wrong number of rows returned"
res = iris[2] == iris[2]
res_rows = res.nrow
assert res_rows == rows, "dimension mismatch"
new_rows = iris[res].nrow
assert new_rows == 150, "wrong number of rows returned"
#vec/scaler
res = iris[0] == 4.7
res_rows = res.nrow
assert res_rows == rows, "dimension mismatch"
new_rows = iris[res].nrow
assert new_rows == 2, "wrong number of rows returned"
res = 3.5 == iris[1]
res_rows = res.nrow
assert res_rows == rows, "dimension mismatch"
new_rows = iris[res].nrow
assert new_rows == 6, "wrong number of rows returned"
# frame/frame
res = iris == iris
res_rows, res_cols = res.dim
assert res_rows == rows and res_cols == cols, "dimension mismatch"
res = iris[0:2] == iris[1:3]
res_rows, res_cols = res.dim
assert res_rows == rows and res_cols == 2, "dimension mismatch"
#try:
# res = iris == iris[0:3]
# res.show()
# assert False, "expected error. frames are different dimensions."
#except EnvironmentError:
# pass
if __name__ == "__main__":
pyunit_utils.standalone_test(binop_eq)
else:
binop_eq()
|
{
"content_hash": "a4a882a0b0b11797264a58c867b71f16",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 87,
"avg_line_length": 26.630434782608695,
"alnum_prop": 0.6020408163265306,
"repo_name": "h2oai/h2o-dev",
"id": "bd4c05fd2d273ba63252eb5e53cf200c44d911bc",
"size": "2450",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_munging/binop/pyunit_binop2_eq.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "162399"
},
{
"name": "CoffeeScript",
"bytes": "267048"
},
{
"name": "Emacs Lisp",
"bytes": "6465"
},
{
"name": "HTML",
"bytes": "140849"
},
{
"name": "Java",
"bytes": "6216622"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Jupyter Notebook",
"bytes": "5585408"
},
{
"name": "Makefile",
"bytes": "34105"
},
{
"name": "Python",
"bytes": "2644394"
},
{
"name": "R",
"bytes": "1848754"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "22830"
},
{
"name": "Shell",
"bytes": "47513"
},
{
"name": "TeX",
"bytes": "579960"
}
],
"symlink_target": ""
}
|
"""Parse a KeyStore in PKCS#12 format
Using openssl, it is possible to dump the certificates and private keys from
a PKCS#12 keystore:
openssl pkcs12 -info -passin pass:changeit -nodes -in store.p12
Nevertheless this command does not show the bags with type "secretBag", that
contain secret keys for symmetric encryption algorithms.
Documentation:
* https://tools.ietf.org/html/rfc7292
RFC 7292, PKCS #12: Personal Information Exchange Syntax v1.1
* https://tools.ietf.org/html/rfc2315
RFC 2315, PKCS #7: Cryptographic Message Syntax Version 1.5
* https://tools.ietf.org/html/rfc5208
RFC 5208, Public-Key Cryptography Standards (PKCS) #8:
Private-Key Information Syntax Specification Version 1.2
* https://www.openssl.org/docs/man1.0.2/man1/pkcs12.html
openssl-pkcs12 man page
NB. PKCS#12 pbeWithSHA1And40BitRC2-CBC key-derivation and encryption algorithm
is used to encrypt WebLogic passwords. The code uses JSAFE with algorithm
"PBE/SHA1/RC2/CBC/PKCS12PBE-5-128", which is pbeWithSHA1And40BitRC2-CBC with
five rounds. More information is available on:
* https://bitbucket.org/vladimir_dyuzhev/recover-weblogic-password/src/b48ef4a82db57f12e52788fe08b80e54e847d42c/src/weblogic/security/internal/encryption/JSafeSecretKeyEncryptor.java
* https://www.cryptsoft.com/pkcs11doc/v220/group__SEC__12__27__PKCS____12__PASSWORD__BASED__ENCRYPTION__AUTHENTICATION__MECHANISMS.html
* https://github.com/maaaaz/weblogicpassworddecryptor
* https://blog.netspi.com/decrypting-weblogic-passwords/
* https://github.com/NetSPI/WebLogicPasswordDecryptor/blob/master/Invoke-WebLogicPasswordDecryptor.psm1
"""
import argparse
import binascii
import datetime
import hashlib
import hmac
import logging
import os.path
import re
import struct
import sys
import tempfile
import Cryptodome.Cipher.AES
import Cryptodome.Cipher.ARC2
import Cryptodome.Cipher.DES3
import rc2
import util_asn1
from util_bin import run_openssl_show_cert, run_process_with_input, xx
from util_crypto import report_if_missing_cryptography, describe_der_certificate
logger = logging.getLogger(__name__)
def generate_p12_keystore(password):
"""Generate a PKCS#12 keystore with some content"""
temporary_dir = tempfile.mkdtemp(suffix='_java_keystore-test')
ks_path = os.path.join(temporary_dir, 'store.jks')
try:
# By default it generates a DSA keypair
run_process_with_input(
[
'keytool', '-genkeypair', '-noprompt',
'-keyalg', 'dsa',
'-storetype', 'pkcs12',
'-keystore', ks_path,
'-storepass', password,
'-alias', 'mykeypair',
'-dname', 'CN=example',
],
None, fatal=True)
run_process_with_input(
[
'keytool', '-genkeypair', '-noprompt',
'-keyalg', 'rsa', '-sigalg', 'SHA256withRSA',
'-storetype', 'pkcs12',
'-keystore', ks_path,
'-storepass', password,
'-alias', 'mykeypair_rsa_sha256sig',
'-dname', 'CN=example',
],
None, fatal=True)
# Add a secret key
run_process_with_input(
[
'keytool', '-genseckey',
'-keyalg', 'aes', '-keysize', '192',
'-storetype', 'pkcs12',
'-keystore', ks_path,
'-storepass', password,
'-alias', 'mysecret_aes192key',
],
None, fatal=True)
with open(ks_path, 'rb') as fks:
ks_content = fks.read()
if not ks_content:
raise ValueError("keytool did not produce any output")
return ks_content
finally:
try:
os.remove(ks_path)
except OSError as exc:
# If removing the files failed, the error will appear in rmdir
logger.debug("Error while removing files: %r", exc)
os.rmdir(temporary_dir)
def pkcs12_derivation(alg, id_byte, password, salt, iterations, result_size=None):
"""Compute a key and iv from a password and salt according to PKCS#12
id_byte is, according to https://tools.ietf.org/html/rfc7292#appendix-B.3 :
* 1 to generate a key
* 2 to generate an initial value (IV)
* 3 to generate an integrity key
OpenSSL implementation:
https://github.com/openssl/openssl/blob/OpenSSL_1_1_1/crypto/pkcs12/p12_key.c
"""
if alg == 'SHA1':
hash_func = hashlib.sha1
u = 160 # SHA1 digest size, in bits
v = 512 # SHA1 block size, in bits
elif alg == 'SHA256':
hash_func = hashlib.sha256
u = 256 # SHA256 digest size, in bits
v = 512 # SHA256 block size, in bits
else:
raise NotImplementedError("Unimplemented algorithm {} for PKCS#12 key derivation".format(alg))
assert (u % 8) == (v % 8) == 0
u_bytes = u // 8
v_bytes = v // 8
if result_size is None:
result_size = u_bytes
diversifier = struct.pack('B', id_byte) * v_bytes
expanded_salt_size = v_bytes * ((len(salt) + v_bytes - 1) // v_bytes)
expanded_salt = (salt * ((expanded_salt_size // len(salt)) + 1))[:expanded_salt_size]
assert len(expanded_salt) == expanded_salt_size
pass_bytes = password.encode('utf-16be') + b'\0\0'
expanded_pass_size = v_bytes * ((len(pass_bytes) + v_bytes - 1) // v_bytes)
expanded_pass = (pass_bytes * ((expanded_pass_size // len(pass_bytes)) + 1))[:expanded_pass_size]
assert len(expanded_pass) == expanded_pass_size
i_size = expanded_salt_size + expanded_pass_size
i_value = expanded_salt + expanded_pass
result = b''
while len(result) < result_size:
ctx = hash_func(diversifier)
ctx.update(i_value)
a_value = ctx.digest()
for _ in range(1, iterations):
a_value = hash_func(a_value).digest()
assert len(a_value) == u_bytes
result += a_value
b_value = struct.unpack(v_bytes * 'B', (a_value * ((v_bytes + u_bytes - 1) // u_bytes))[:v_bytes])
new_i_value = []
for j in range(0, i_size, v_bytes):
# Ij = Ij + B + 1
ij = list(struct.unpack(v_bytes * 'B', i_value[j:j + v_bytes]))
c = 1
for k in range(v_bytes - 1, -1, -1):
c += ij[k] + b_value[k]
ij[k] = c & 0xff
c = c >> 8
new_i_value.append(struct.pack(v_bytes * 'B', *ij))
i_value = b''.join(new_i_value)
return result[:result_size]
# Check the implementation with values from "openssl pkcs12" with OPENSSL_DEBUG_KEYGEN
assert pkcs12_derivation(
'SHA1', 3, 'changeit',
binascii.unhexlify('c6b068958d7d6085ba52c9cc3212a8fc2e50b3da'), 100000
) == binascii.unhexlify('ef3c7f41e19e7bc7bf06650164aff556d15206d7')
assert pkcs12_derivation(
'SHA1', 1, 'changeit',
binascii.unhexlify('a9fb3e857865d5e2aeff3983389c980d5de4bf39'), 50000, 24
) == binascii.unhexlify('12fe77bc0be3ae0d063c4858e948ff4e85c39daa08b833c9')
assert pkcs12_derivation(
'SHA1', 2, 'changeit',
binascii.unhexlify('a9fb3e857865d5e2aeff3983389c980d5de4bf39'), 50000, 8
) == binascii.unhexlify('13515c2efce50ef9')
assert pkcs12_derivation(
'SHA256', 3, 'changeit',
binascii.unhexlify('ad18630f2594018bd53c4573a7b03f89afda3e87'), 10000
) == binascii.unhexlify('894a3be59b92531f08a458c54e4d89493fd9dda40d65b1831ff3ca69f4ff716c')
def try_pkcs12_decrypt(encrypted, enc_alg, password, indent=''):
"""Try to decrypt some data with the given password and PKCS#12 password-based encryption algorithms"""
if isinstance(enc_alg, util_asn1.PKCS12PbeAlg):
if enc_alg.oid_name == 'pbeWithSHA1And3-KeyTripleDES-CBC':
# 192-bits 3DES key and 64-bit IV from SHA1
key = pkcs12_derivation(alg='SHA1', id_byte=1, password=password, salt=enc_alg.salt,
iterations=enc_alg.iterations, result_size=24)
iv = pkcs12_derivation(alg='SHA1', id_byte=2, password=password, salt=enc_alg.salt,
iterations=enc_alg.iterations, result_size=8)
crypto_3des = Cryptodome.Cipher.DES3.new(key, Cryptodome.Cipher.DES3.MODE_CBC, iv)
decrypted = crypto_3des.decrypt(encrypted)
elif enc_alg.oid_name == 'pbeWithSHA1And40BitRC2-CBC':
# 40-bits RC2 key and 64-bit IV from SHA1
key = pkcs12_derivation(alg='SHA1', id_byte=1, password=password, salt=enc_alg.salt,
iterations=enc_alg.iterations, result_size=5)
iv = pkcs12_derivation(alg='SHA1', id_byte=2, password=password, salt=enc_alg.salt,
iterations=enc_alg.iterations, result_size=8)
try:
crypto_rc2 = Cryptodome.Cipher.ARC2.new(key, Cryptodome.Cipher.ARC2.MODE_CBC, iv, effective_keylen=40)
decrypted = crypto_rc2.decrypt(encrypted)
except ValueError:
# Use custom RC2 implementation because "effective_keylen=40" is not always supported
# https://github.com/Legrandin/pycryptodome/issues/267
crypto_rc2 = rc2.RC2(key)
decrypted = crypto_rc2.decrypt(encrypted, rc2.MODE_CBC, iv)
else:
raise NotImplementedError("Unimplemented encryption algorithm {}".format(enc_alg))
elif isinstance(enc_alg, util_asn1.PKCS12Pbes2Alg):
pwd_bytes = password.encode('utf-8')
key = hashlib.pbkdf2_hmac(enc_alg.prf_alg, pwd_bytes, enc_alg.salt, enc_alg.iterations, enc_alg.dklen)
if enc_alg.enc_alg == 'aes256-CBC-PAD':
crypto_aes = Cryptodome.Cipher.AES.new(key, Cryptodome.Cipher.AES.MODE_CBC, enc_alg.enc_iv)
decrypted = crypto_aes.decrypt(encrypted)
else:
raise NotImplementedError("Unimplemented encryption algorithm {}".format(enc_alg))
else:
raise NotImplementedError("Unimplemented encryption algorithm {}".format(enc_alg))
# Check PKCS#5 padding
padlen, = struct.unpack('B', decrypted[-1:])
if not (1 <= padlen <= 0x10) or any(x != decrypted[-1] for x in decrypted[-padlen:]):
print("{}* wrong password (bad PKCS#5 padding)".format(indent))
return None
print("{}(password: {})".format(indent, repr(password)))
return decrypted[:-padlen]
def print_p12_keybag(keybag_der, password, show_pem=False, list_only=False, indent=''):
"""Parse PKCS#12 keyBag ASN.1 data"""
# KeyBag ::= PrivateKeyInfo -- from PKCS #8
# EncryptedPrivateKeyInfo ::= SEQUENCE {
# encryptionAlgorithm EncryptionAlgorithmIdentifier,
# encryptedData EncryptedData
# }
# EncryptionAlgorithmIdentifier ::= AlgorithmIdentifier
# EncryptedData ::= OCTET STRING
enc_alg_der, enc_data_der = util_asn1.decode_sequence(keybag_der, 2)
enc_alg = util_asn1.decode_x509_algid(enc_alg_der)
enc_data = util_asn1.decode_octet_string(enc_data_der)
print("{}* encryption algorithm: {}".format(indent, enc_alg))
decrypted = try_pkcs12_decrypt(enc_data, enc_alg, password, indent=indent)
if decrypted is not None:
# Show the private key
util_asn1.show_pkcs8_private_key_info(decrypted, list_only=list_only, show_pem=show_pem, indent=indent)
def print_p12_certBag(certbag_der, show_pem=False, list_only=False, indent=''):
"""Parse PKCS#12 certBag ASN.1 data"""
# CertBag ::= SEQUENCE {
# certId BAG-TYPE.&id ({CertTypes}),
# certValue [0] EXPLICIT BAG-TYPE.&Type ({CertTypes}{@certId})
# }
cert_id_der, cert_value_der = util_asn1.decode_sequence(certbag_der, 2)
cert_id = util_asn1.decode_oid(cert_id_der)
cert_value_der = util_asn1.decode_object(cert_value_der)
if cert_id != 'x509Certificate':
raise NotImplementedError("Unknown certificate format {}".format(repr(cert_id)))
cert = util_asn1.decode_octet_string(cert_value_der)
description = describe_der_certificate(cert)
if description:
print("{}* Certificate: {}".format(indent, description))
else:
print("{}* Certificate: (no description available)".format(indent))
run_openssl_show_cert(cert, list_only=list_only, show_pem=show_pem, indent=indent)
def print_p12_secretBag(secretbag_der, password, show_pem=False, list_only=False, indent=''):
"""Parse PKCS#12 secretBag ASN.1 data"""
# SecretBag ::= SEQUENCE {
# secretTypeId BAG-TYPE.&id ({SecretTypes}),
# secretValue [0] EXPLICIT BAG-TYPE.&Type ({SecretTypes} {@secretTypeId})
# }
secret_type_id_der, secret_value_der = util_asn1.decode_sequence(secretbag_der, 2)
secret_type_id = util_asn1.decode_oid(secret_type_id_der)
secret_value_der = util_asn1.decode_object(secret_value_der)
print("{}* secret type: {}".format(indent, secret_type_id))
secret_value = util_asn1.decode_octet_string(secret_value_der)
if secret_type_id == 'keyBag':
print_p12_keybag(secret_value, password, show_pem=show_pem, list_only=list_only, indent=indent)
else:
raise NotImplementedError("Unimplemented secretBag type {}".format(secret_type_id))
def print_p12_safe_contents(safe_contents_der, password, show_pem=False, list_only=False, indent=''):
"""Parse PKCS#12 SafeContents ASN.1 data
https://tools.ietf.org/html/rfc7292#section-4.2
The SafeContents type is made up of SafeBags. Each SafeBag holds one
piece of information -- a key, a certificate, etc. -- which is
identified by an object identifier.
"""
# SafeContents ::= SEQUENCE OF SafeBag
# SafeBag ::= SEQUENCE {
# bagId BAG-TYPE.&id ({PKCS12BagSet})
# bagValue [0] EXPLICIT BAG-TYPE.&Type({PKCS12BagSet}{@bagId}),
# bagAttributes SET OF PKCS12Attribute OPTIONAL
# }
# PKCS12Attribute ::= SEQUENCE {
# attrId ATTRIBUTE.&id ({PKCS12AttrSet}),
# attrValues SET OF ATTRIBUTE.&Type ({PKCS12AttrSet}{@attrId})
# } -- This type is compatible with the X.500 type 'Attribute'
# PKCS12AttrSet ATTRIBUTE ::= {
# friendlyName | -- from PKCS #9
# localKeyId, -- from PKCS #9
# ... -- Other attributes are allowed
# }
safe_bags = util_asn1.decode_sequence(safe_contents_der)
print("{}* {} {}:".format(indent, len(safe_bags), "safe bags" if len(safe_bags) >= 2 else "safe bag"))
for idx_safe_bag, safe_bag_der in enumerate(safe_bags):
safe_bag = util_asn1.decode_sequence(safe_bag_der, counts=(2, 3))
bag_id = util_asn1.decode_oid(safe_bag[0])
bag_value = util_asn1.decode_object(safe_bag[1])
try:
bag_attributes = util_asn1.decode_set(safe_bag[2]) if len(safe_bag) >= 3 else []
except NotImplementedError as exc:
# Recover from error caused by old PyCrypto
logger.warning("Unable to decode bag attributes: %s", exc)
attr_descs = ['?']
else:
attr_descs = []
for bag_attribute_der in bag_attributes:
attr_id_der, attr_values_der = util_asn1.decode_sequence(bag_attribute_der, 2)
attr_id = util_asn1.decode_oid(attr_id_der)
attr_values_der = util_asn1.decode_set(attr_values_der)
attr_values = [util_asn1.decode_any_string(v) for v in attr_values_der]
attr_descs.append("{}={}".format(attr_id, ','.join(repr(v) for v in attr_values)))
if attr_id == 'localKeyID' and len(attr_values) == 1:
m = re.match(r'^Time ([0-9]+)$', attr_values[0])
if m:
# Parse the timestamp from the local key ID
timestamp = int(m.group(1))
attr_descs.append("date='{}'".format(datetime.datetime.fromtimestamp(timestamp / 1000.)))
print("{} [{}] {} ({})".format(indent, idx_safe_bag + 1, bag_id, ', '.join(attr_descs)))
if bag_id == 'keyBag':
print_p12_keybag(bag_value, password, show_pem=show_pem, list_only=list_only, indent=indent + " ")
elif bag_id == 'certBag':
print_p12_certBag(bag_value, show_pem=show_pem, list_only=list_only, indent=indent + " ")
elif bag_id == 'secretBag':
print_p12_secretBag(bag_value, password, show_pem=show_pem, list_only=list_only, indent=indent + " ")
else:
print("{} * bag value: {}".format(indent, repr(bag_value)))
raise NotImplementedError("Unimplemented bag id {}".format(bag_id))
def print_p12_keystore(ks_content, password, show_pem=False, list_only=False):
"""Parse a PKCS#12 KeyStore file and print it"""
# run_process_with_input(['openssl', 'asn1parse', '-i', '-inform', 'DER'], ks_content, fatal=True)
# PFX (Personal Information Exchange) is defined as:
# PFX ::= SEQUENCE {
# version INTEGER {v3(3)}(v3,...),
# authSafe ContentInfo,
# macData MacData OPTIONAL
# }
version, authsafe_der, macdata_der = util_asn1.decode_sequence(ks_content, 3)
if version != 3:
raise NotImplementedError("Unimplemented PFX version {}".format(version))
# ContentInfo ::= SEQUENCE {
# contentType ContentType,
# content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL
# }
# ContentType ::= OBJECT IDENTIFIER
authsafe_content_type_der, authsafe_content_der = util_asn1.decode_sequence(authsafe_der, 2)
authsafe_content_type = util_asn1.decode_oid(authsafe_content_type_der)
if authsafe_content_type != 'pkcs7-data':
raise NotImplementedError("Unimplemented PFX content type {}".format(authsafe_content_type))
authsafe_content_der = util_asn1.decode_object(authsafe_content_der)
authsafe_content = util_asn1.decode_octet_string(authsafe_content_der)
# MacData ::= SEQUENCE {
# mac DigestInfo,
# macSalt OCTET STRING,
# iterations INTEGER DEFAULT 1
# }
macdata_asn1 = util_asn1.decode_sequence(macdata_der)
if len(macdata_asn1) == 2:
mac_der, mac_salt_der = macdata_asn1
mac_iterations = 1
elif len(macdata_asn1) == 3:
mac_der, mac_salt_der, mac_iterations = macdata_asn1
else:
raise ValueError("Unexpected number of items in ASN.1 MacData sequence")
mac_salt = util_asn1.decode_octet_string(mac_salt_der)
# DigestInfo ::= SEQUENCE {
# digestAlgorithm DigestAlgorithmIdentifier,
# digest Digest
# }
# DigestAlgorithmIdentifier ::= AlgorithmIdentifier
# Digest ::= OCTET STRING
mac_digest_algorithm_der, mac_digest_der = util_asn1.decode_sequence(mac_der, 2)
mac_digest_algorithm = util_asn1.decode_x509_algid(mac_digest_algorithm_der)
mac_digest = util_asn1.decode_octet_string(mac_digest_der)
print("* PKCS#12 Keystore MAC:")
print(" * algorithm: {}".format(mac_digest_algorithm))
print(" * salt: {}".format(xx(mac_salt)))
print(" * iterations: {}".format(mac_iterations))
print(" * HMAC digest: {}".format(xx(mac_digest)))
mac_key = pkcs12_derivation(
alg=mac_digest_algorithm,
id_byte=3,
password=password,
salt=mac_salt,
iterations=mac_iterations)
if mac_digest_algorithm == 'SHA1':
hash_func = hashlib.sha1
elif mac_digest_algorithm == 'SHA256':
hash_func = hashlib.sha256
else:
raise NotImplementedError("Unimplemented algorithm {} for PKCS#12 hmac verification".format(hash_func))
mac_hmac = hmac.new(key=mac_key, msg=authsafe_content, digestmod=hash_func).digest()
if mac_hmac == mac_digest:
print(" (password: {})".format(repr(password)))
print(" (HMAC key: {})".format(xx(mac_key)))
else:
print(" (computed HMAC: {})".format(xx(mac_hmac)))
print(" * wrong password (pad HMAC digest)")
# AuthenticatedSafe ::= SEQUENCE OF ContentInfo
# -- Data if unencrypted
# -- EncryptedData if password-encrypted
# -- EnvelopedData if public key-encrypted
authsafe_seq = util_asn1.decode_sequence(authsafe_content)
print("* {} data blocks:".format(len(authsafe_seq)))
for blk_index, blk_der in enumerate(authsafe_seq):
blk_content_type_der, blk_content_der = util_asn1.decode_sequence(blk_der, 2)
blk_content_type = util_asn1.decode_oid(blk_content_type_der)
blk_content_der = util_asn1.decode_object(blk_content_der) # tag "cont[0]"
if blk_content_type == 'pkcs7-data':
safe_contents = util_asn1.decode_octet_string(blk_content_der)
print(" [{}] unencrypted safe contents:".format(blk_index + 1))
print_p12_safe_contents(safe_contents, password, show_pem=show_pem, list_only=list_only, indent=" ")
elif blk_content_type == 'pkcs7-encryptedData':
print(" [{}] encrypted safe contents:".format(blk_index + 1))
# EncryptedData ::= SEQUENCE {
# version Version,
# encryptedContentInfo EncryptedContentInfo
# }
encblk_version, encrypted_ci_der = util_asn1.decode_sequence(blk_content_der, 2)
if encblk_version != 0:
raise NotImplementedError("Unimplemented PKCS#7 EncryptedData version {}".format(encblk_version))
# EncryptedContentInfo ::= SEQUENCE {
# contentType ContentType,
# contentEncryptionAlgorithm ContentEncryptionAlgorithmIdentifier,
# encryptedContent [0] IMPLICIT EncryptedContent OPTIONAL
# }
# ContentEncryptionAlgorithmIdentifier ::= AlgorithmIdentifier
# EncryptedContent ::= OCTET STRING
enc_ctype_der, enc_alg_der, enc_content_der = util_asn1.decode_sequence(encrypted_ci_der, 3)
enc_ctype = util_asn1.decode_oid(enc_ctype_der)
enc_alg = util_asn1.decode_x509_algid(enc_alg_der)
enc_content = util_asn1.decode_object(enc_content_der) # tag "cont[0]"
if enc_ctype != 'pkcs7-data':
raise NotImplementedError("Unimplemented PKCS#7 EncryptedData content type {}".format(enc_ctype))
print(" * encryption algorithm: {}".format(enc_alg))
safe_contents = try_pkcs12_decrypt(enc_content, enc_alg, password, indent=" ")
if safe_contents is not None:
print_p12_safe_contents(safe_contents, password, show_pem=show_pem, list_only=list_only, indent=" ")
else:
raise NotImplementedError("Unimplemented bag content type {}".format(blk_content_type))
def main(argv=None):
"""Program entry point"""
parser = argparse.ArgumentParser(
description="Parse a PKCS#12 keystore file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input', metavar='KEYSTORE', nargs='?', type=str,
help="load a keystore instead of generating one")
parser.add_argument('-d', '--debug', action='store_true',
help="show debug messages")
parser.add_argument('-p', '--password', type=str, default='changeit',
help="keystore password")
parser.add_argument('-l', '--list', action='store_true',
help="list only, without printing the data")
parser.add_argument('-P', '--pem', action='store_true',
help="show certificates and private keys in PEM format")
args = parser.parse_args(argv)
logging.basicConfig(format='[%(levelname)-5s] %(message)s',
level=logging.DEBUG if args.debug else logging.INFO)
report_if_missing_cryptography()
if args.input:
with open(args.input, 'rb') as fin:
ks_content = fin.read()
logger.debug("Parsing file %r (%d bytes)", args.input, len(ks_content))
else:
try:
ks_content = generate_p12_keystore(args.password)
except ValueError as exc:
logger.fatal("Generating a keystore failed: %s", exc)
return 1
logger.debug("Parsing keystore (%d bytes)", len(ks_content))
try:
print_p12_keystore(ks_content, args.password, show_pem=args.pem, list_only=args.list)
except ValueError as exc:
logger.fatal("Parsing the keystore failed: %s", exc)
raise # Show the stack trace
return 0
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "ed783fd34153f1a430ecc274e69504fb",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 182,
"avg_line_length": 45.422222222222224,
"alnum_prop": 0.6267123287671232,
"repo_name": "fishilico/shared",
"id": "fa61571b377cc972e48e399ea32c03d8a211070c",
"size": "25665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "java/keystore/parse_pkcs12.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "63325"
},
{
"name": "C",
"bytes": "1077380"
},
{
"name": "Coq",
"bytes": "3423"
},
{
"name": "Dockerfile",
"bytes": "3226"
},
{
"name": "HTML",
"bytes": "38654"
},
{
"name": "Java",
"bytes": "2410"
},
{
"name": "Makefile",
"bytes": "63176"
},
{
"name": "PHP",
"bytes": "13954"
},
{
"name": "Python",
"bytes": "1567078"
},
{
"name": "Roff",
"bytes": "15423"
},
{
"name": "Rust",
"bytes": "55834"
},
{
"name": "Shell",
"bytes": "75810"
},
{
"name": "TeX",
"bytes": "60215"
}
],
"symlink_target": ""
}
|
"""Tests of grpc.channel_ready_future."""
import threading
import unittest
import grpc
import grpc_gcp
from grpc_gcp_test.unit.framework.common import test_constants
from grpc_gcp_test.unit import _thread_pool
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._value = None
def accept_value(self, value):
with self._condition:
self._value = value
self._condition.notify_all()
def block_until_called(self):
with self._condition:
while self._value is None:
self._condition.wait()
return self._value
class ChannelReadyFutureTest(unittest.TestCase):
def test_lonely_channel_connectivity(self):
callback = _Callback()
channel_config = grpc_gcp.api_config_from_text_pb('')
channel = grpc_gcp.insecure_channel(
'localhost:12345',
options=((grpc_gcp.API_CONFIG_CHANNEL_ARG, channel_config),)
)
ready_future = grpc.channel_ready_future(channel)
ready_future.add_done_callback(callback.accept_value)
with self.assertRaises(grpc.FutureTimeoutError):
ready_future.result(timeout=test_constants.SHORT_TIMEOUT)
self.assertFalse(ready_future.cancelled())
self.assertFalse(ready_future.done())
self.assertTrue(ready_future.running())
ready_future.cancel()
value_passed_to_callback = callback.block_until_called()
self.assertIs(ready_future, value_passed_to_callback)
self.assertTrue(ready_future.cancelled())
self.assertTrue(ready_future.done())
self.assertFalse(ready_future.running())
def test_immediately_connectable_channel_connectivity(self):
thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
server = grpc.server(thread_pool, options=(('grpc.so_reuseport', 0),))
port = server.add_insecure_port('[::]:0')
server.start()
callback = _Callback()
channel_config = grpc_gcp.api_config_from_text_pb('')
channel = grpc_gcp.insecure_channel(
'localhost:{}'.format(port),
options=((grpc_gcp.API_CONFIG_CHANNEL_ARG, channel_config),)
)
ready_future = grpc.channel_ready_future(channel)
ready_future.add_done_callback(callback.accept_value)
self.assertIsNone(
ready_future.result(timeout=test_constants.LONG_TIMEOUT))
value_passed_to_callback = callback.block_until_called()
self.assertIs(ready_future, value_passed_to_callback)
self.assertFalse(ready_future.cancelled())
self.assertTrue(ready_future.done())
self.assertFalse(ready_future.running())
# Cancellation after maturity has no effect.
ready_future.cancel()
self.assertFalse(ready_future.cancelled())
self.assertTrue(ready_future.done())
self.assertFalse(ready_future.running())
self.assertFalse(thread_pool.was_used())
if __name__ == '__main__':
unittest.main(verbosity=2)
|
{
"content_hash": "b631f70411d089ef600780aaf910cade",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 78,
"avg_line_length": 36.642857142857146,
"alnum_prop": 0.6491228070175439,
"repo_name": "GoogleCloudPlatform/grpc-gcp-python",
"id": "612e2a79262ca9f810d11d1c8b3b1f0109a0800c",
"size": "3655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/grpc_gcp_test/unit/_channel_ready_future_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "284661"
},
{
"name": "Shell",
"bytes": "2082"
}
],
"symlink_target": ""
}
|
"""Tests for Python ops defined in sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class SparseToIndicatorTest(test_util.TensorFlowTestCase):
def _SparseTensor_5x6(self, dtype):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x3x4(self, dtype):
# Includes two entries with the form [1, 1, x] : 150.
ind = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 2], [1, 0, 3], [1, 1, 0],
[1, 1, 1], [1, 1, 2], [1, 2, 2]])
val = np.array([1, 10, 12, 103, 150, 149, 150, 122])
shape = np.array([2, 3, 4])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtype),
constant_op.constant(shape, dtypes.int64))
def testInt32(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(dtypes.int32)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = ((0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33))
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testInt64(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 50).eval()
expected_output = np.zeros((5, 50), dtype=np.bool)
expected_trues = [(0, 0), (1, 10), (1, 13), (1, 14), (3, 32), (3, 33)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
def testHigherRank(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_2x3x4(dtypes.int64)
output = sparse_ops.sparse_to_indicator(sp_input, 200).eval()
expected_output = np.zeros((2, 3, 200), dtype=np.bool)
expected_trues = [(0, 0, 1), (0, 1, 10), (0, 1, 12), (1, 0, 103),
(1, 1, 149), (1, 1, 150), (1, 2, 122)]
for expected_true in expected_trues:
expected_output[expected_true] = True
self.assertAllEqual(output, expected_output)
class SparseMergeTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices = np.array([0, 13, 10, 33, 32, 14])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return indices, values
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
indices, values = self._SparseTensorValue_3x50(indices_dtype, values_dtype)
return (sparse_tensor.SparseTensor.from_value(indices),
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 10], [1, 13], [1, 14], [2, 32], [2, 33]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def _AssertResultsNotSorted(self, output, vocab_size):
self.assertAllEqual(output.indices,
[[0, 0], [1, 13], [1, 10], [2, 33], [2, 32], [1, 14]])
self.assertAllEqual(output.values, [-3, 4, 1, 9, 5, 1])
self.assertAllEqual(output.dense_shape, [3, vocab_size])
def testInt32AndFloat32(self):
vocab_size = 50
indices_v, values_v = self._SparseTensorValue_3x50(np.int32, np.float32)
with self.test_session(use_gpu=False) as sess:
for indices in (indices_v,
sparse_tensor.SparseTensor.from_value(indices_v)):
for values in (values_v,
sparse_tensor.SparseTensor.from_value(values_v)):
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat32(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt32AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int32, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = sess.run(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat32NonCanonicalOrder(self):
vocab_size = 50
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size, already_sorted=True)
output = sess.run(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testInt64AndFloat64NonCanonicalOrder(self):
vocab_size = 50
vocab_size_tensor = constant_op.constant(vocab_size, dtypes.int64)
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(
indices, values, vocab_size_tensor, already_sorted=True)
output = sess.run(sp_output)
self._AssertResultsNotSorted(output, vocab_size)
def testShouldSetLastDimensionInDynamicShape(self):
with ops.Graph().as_default():
shape = constant_op.constant([2, 2], dtype=dtypes.int64)
dynamic_shape = array_ops.placeholder_with_default(shape, shape=[2])
ids = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]],
values=[1, 3],
dense_shape=dynamic_shape)
values = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1]],
values=[0.4, 0.7],
dense_shape=dynamic_shape)
merged = sparse_ops.sparse_merge(
sp_ids=ids, sp_values=values, vocab_size=5)
self.assertEqual(5, merged.get_shape()[1])
class SparseMergeHighDimTest(test_util.TensorFlowTestCase):
def _SparseTensor_3x50(self, indices_dtype, values_dtype):
# NOTE: This input is intentionally not sorted to validate the
# already_sorted flag below.
ind = np.array([[0, 0], [1, 0], [1, 2], [2, 0], [2, 1], [1, 1]])
# NB: these are not sorted
indices0 = np.array([0, 13, 10, 33, 32, 14])
indices1 = np.array([12, 4, 0, 0, 1, 30])
values = np.array([-3, 4, 1, 9, 5, 1])
shape = np.array([3, 3])
indices0 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices0, indices_dtype), np.array(shape, np.int64))
indices1 = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(indices1, indices_dtype), np.array(shape, np.int64))
values = sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(values, values_dtype), np.array(shape, np.int64))
return ([sparse_tensor.SparseTensor.from_value(indices0),
sparse_tensor.SparseTensor.from_value(indices1)],
sparse_tensor.SparseTensor.from_value(values))
def _AssertResultsSorted(self, output, vocab_size):
self.assertAllEqual(
output.indices,
[[0, 0, 12], [1, 10, 0], [1, 13, 4], [1, 14, 30], [2, 32, 1],
[2, 33, 0]])
self.assertAllEqual(output.values, [-3, 1, 4, 1, 5, 9])
self.assertAllEqual(output.dense_shape, [3] + vocab_size)
def testInt64AndFloat32(self):
vocab_size = [50, 31]
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float32)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64(self):
vocab_size = [50, 31]
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
def testInt64AndFloat64Shape(self):
vocab_size = [50, 30]
with self.test_session(use_gpu=False) as sess:
indices, values = self._SparseTensor_3x50(np.int64, np.float64)
sp_output = sparse_ops.sparse_merge(indices, values, vocab_size)
output = sess.run(sp_output)
self._AssertResultsSorted(output, vocab_size)
class SparseRetainTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64),
np.array(val, np.int32), np.array(shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
to_retain = np.array([1, 0, 0, 1, 1, 0], dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0], [1, 4], [3, 2]])
self.assertAllEqual(output.values, [0, 14, 32])
self.assertAllEqual(output.dense_shape, [5, 6])
def testRetainNone(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_5x6()
to_retain = np.zeros((6,), dtype=np.bool)
sp_output = sparse_ops.sparse_retain(sp_input, to_retain)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, np.array([]).reshape((0, 2)))
self.assertAllEqual(output.values, [])
self.assertAllEqual(output.dense_shape, [5, 6])
def testMismatchedRetainShape(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_5x6()
to_retain = np.array([1, 0, 0, 1, 0], dtype=np.bool)
with self.assertRaises(ValueError):
sparse_ops.sparse_retain(sp_input, to_retain)
class SparseResetShapeTest(test_util.TensorFlowTestCase):
_IND_2_5_6 = np.array(
[[0, 0, 0], [0, 1, 0], [0, 1, 3], [1, 1, 4], [1, 3, 2], [1, 3, 3]],
dtype=np.int64)
_VAL_2_5_6 = np.array([0, 10, 13, 14, 32, 33], dtype=np.int32)
_SHP_2_5_6 = np.array([2, 5, 6], dtype=np.int64)
def _SparseTensor_2x5x6(self):
return sparse_tensor.SparseTensor(
constant_op.constant(self._IND_2_5_6, dtypes.int64),
constant_op.constant(self._VAL_2_5_6, dtypes.int32),
constant_op.constant(self._SHP_2_5_6, dtypes.int64))
def _SparseTensor_2x5x6_Empty(self):
return sparse_tensor.SparseTensor(
constant_op.constant(
np.empty(shape=[0, 3], dtype=np.int64), dtypes.int64),
constant_op.constant(np.empty(shape=[0], dtype=np.int32), dtypes.int32),
constant_op.constant(self._SHP_2_5_6, dtypes.int64))
def _SparseTensorValue_2x5x6(self):
return sparse_tensor.SparseTensorValue(self._IND_2_5_6, self._VAL_2_5_6,
self._SHP_2_5_6)
def testStaticShapeInfoPreservedWhenNewShapeIsProvidedAndStatic(self):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
self.assertAllEqual([3, 6, 7], sp_output.get_shape())
def testBasic(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testInputUnavailableInGraphConstructionOk(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorValue_2x5x6()
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testFeedInputUnavailableInGraphConstructionOk(self):
with self.test_session(use_gpu=False) as sess:
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
new_shape = np.array([3, 6, 7], dtype=np.int64)
sp_output = sparse_ops.sparse_reset_shape(sp_input, new_shape)
output = sess.run(sp_output,
feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [3, 6, 7])
def testTightBoundingBox(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
sp_output = sparse_ops.sparse_reset_shape(sp_input)
output = sess.run(sp_output)
self.assertAllEqual(output.indices, [[0, 0, 0], [0, 1, 0], [0, 1, 3],
[1, 1, 4], [1, 3, 2], [1, 3, 3]])
self.assertAllEqual(output.values, [0, 10, 13, 14, 32, 33])
self.assertAllEqual(output.dense_shape, [2, 4, 5])
def testTightBoundingBoxEmpty(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6_Empty()
sp_output = sparse_ops.sparse_reset_shape(sp_input)
output = sess.run(sp_output)
self.assertAllEqual(output.indices.shape, [0, 3])
self.assertAllEqual(output.values.shape, [0])
self.assertAllEqual(output.dense_shape, [0, 0, 0])
def testInvalidRank(self):
with self.test_session(use_gpu=False):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7], dtype=np.int64)
with self.assertRaises(ValueError):
sparse_ops.sparse_reset_shape(sp_input, new_shape)
def testInvalidRankNewShapeUnavailableInGraphConstruction(self):
with self.test_session(use_gpu=False) as sess:
new_shape = array_ops.placeholder(dtype=dtypes.int64)
sp_input = self._SparseTensor_2x5x6()
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x == y did not hold element-wise"):
sess.run(out, feed_dict={new_shape: np.array([3, 7], dtype=np.int64)})
def testInvalidDimensionSizeStatic(self):
sp_input = self._SparseTensor_2x5x6()
new_shape = np.array([3, 7, 5], dtype=np.int64)
with self.assertRaisesRegexp(ValueError, "should have dimension sizes"):
sparse_ops.sparse_reset_shape(sp_input, new_shape)
def testInvalidDimensionSizeDynamic(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x5x6()
new_shape = array_ops.placeholder(dtype=dtypes.int32)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out, feed_dict={new_shape: [3, 7, 5]})
def testInvalidDimensionSizeInputUnavailableInGraphConstruction(self):
sp_input = array_ops.sparse_placeholder(dtype=dtypes.int32)
with self.test_session(use_gpu=False) as sess:
new_shape = np.array([3, 7, 5], dtype=np.int64)
out = sparse_ops.sparse_reset_shape(sp_input, new_shape)
with self.assertRaisesOpError("x <= y did not hold element-wise"):
sess.run(out, feed_dict={sp_input: self._SparseTensorValue_2x5x6()})
class SparseFillEmptyRowsTest(test_util.TensorFlowTestCase):
def _SparseTensorValue_5x6(self, dtype=np.int32):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array([0, 10, 13, 14, 32, 33])
shape = np.array([5, 6])
return sparse_tensor.SparseTensorValue(
np.array(ind, np.int64), np.array(val, dtype), np.array(
shape, np.int64))
def _SparseTensor_5x6(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_5x6())
def _SparseTensor_String5x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]])
val = np.array(["a", "b", "c", "d", "e", "f"])
shape = np.array([5, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.string),
constant_op.constant(shape, dtypes.int64))
def _SparseTensor_2x6(self):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4]])
val = np.array([0, 10, 13, 14])
shape = np.array([2, 6])
return sparse_tensor.SparseTensor(
constant_op.constant(ind, dtypes.int64),
constant_op.constant(val, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
def testFillNumber(self):
with self.test_session(use_gpu=False) as sess:
for sp_input in (self._SparseTensorValue_5x6(), self._SparseTensor_5x6()):
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testFillFloat(self):
with self.test_session(use_gpu=False) as sess:
values = constant_op.constant(
[0.0, 10.0, 13.0, 14.0, 32.0, 33.0], dtype=dtypes.float64)
default_value = constant_op.constant(-1.0, dtype=dtypes.float64)
sp_input = sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]]),
values=values,
dense_shape=np.array([5, 6]))
sp_output, empty_row_indicator = (sparse_ops.sparse_fill_empty_rows(
sp_input, default_value))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4],
[2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllClose(output.values, [0, 10, 13, 14, -1, 32, 33, -1])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
values_grad_err = gradient_checker.compute_gradient_error(
values, values.shape.as_list(), sp_output.values, [8], delta=1e-8)
self.assertGreater(values_grad_err, 0)
self.assertLess(values_grad_err, 1e-8)
default_value_grad_err = gradient_checker.compute_gradient_error(
default_value,
default_value.shape.as_list(),
sp_output.values, [8],
delta=1e-8)
self.assertGreater(default_value_grad_err, 0)
self.assertLess(default_value_grad_err, 1e-8)
def testFillString(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_String5x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, ""))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(
output.indices,
[[0, 0], [1, 0], [1, 3], [1, 4], [2, 0], [3, 2], [3, 3], [4, 0]])
self.assertAllEqual(output.values,
[b"a", b"b", b"c", b"d", b"", b"e", b"f", b""])
self.assertAllEqual(output.dense_shape, [5, 6])
self.assertAllEqual(empty_row_indicator_out,
np.array([0, 0, 1, 0, 1]).astype(np.bool))
def testNoEmptyRows(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensor_2x6()
sp_output, empty_row_indicator = (
sparse_ops.sparse_fill_empty_rows(sp_input, -1))
output, empty_row_indicator_out = sess.run(
[sp_output, empty_row_indicator])
self.assertAllEqual(output.indices, [[0, 0], [1, 0], [1, 3], [1, 4]])
self.assertAllEqual(output.values, [0, 10, 13, 14])
self.assertAllEqual(output.dense_shape, [2, 6])
self.assertAllEqual(empty_row_indicator_out, np.zeros(2).astype(np.bool))
class SparseAddTest(test_util.TensorFlowTestCase):
def testValuesInVariable(self):
indices = constant_op.constant([[1]], dtype=dtypes.int64)
values = variables.Variable([1], trainable=False, dtype=dtypes.float32)
shape = constant_op.constant([1], dtype=dtypes.int64)
sp_input = sparse_tensor.SparseTensor(indices, values, shape)
sp_output = sparse_ops.sparse_add(sp_input, sp_input)
with self.test_session(use_gpu=False) as sess:
sess.run(variables.global_variables_initializer())
output = sess.run(sp_output)
self.assertAllEqual(output.values, [2])
class SparseReduceTest(test_util.TensorFlowTestCase):
# [[1, ?, 2]
# [?, 3, ?]]
# where ? is implicitly-zero.
ind = np.array([[0, 0], [0, 2], [1, 1]]).astype(np.int64)
vals = np.array([1, 1, 1]).astype(np.int32)
dense_shape = np.array([2, 3]).astype(np.int64)
def _compare(self, sp_t, reduction_axes, ndims, keep_dims, do_sum):
densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
np_ans = densified
if reduction_axes is None:
if do_sum:
np_ans = np.sum(np_ans, keepdims=keep_dims)
else:
np_ans = np.max(np_ans, keepdims=keep_dims)
else:
if not isinstance(reduction_axes, list): # Single scalar.
reduction_axes = [reduction_axes]
reduction_axes = np.array(reduction_axes).astype(np.int32)
# Handles negative axes.
reduction_axes = (reduction_axes + ndims) % ndims
# Loop below depends on sorted.
reduction_axes.sort()
for ra in reduction_axes.ravel()[::-1]:
if do_sum:
np_ans = np.sum(np_ans, axis=ra, keepdims=keep_dims)
else:
np_ans = np.max(np_ans, axis=ra, keepdims=keep_dims)
with self.cached_session():
if do_sum:
tf_dense_ans = sparse_ops.sparse_reduce_sum(sp_t, reduction_axes,
keep_dims)
else:
tf_dense_ans = sparse_ops.sparse_reduce_max(sp_t, reduction_axes,
keep_dims)
out_dense = tf_dense_ans.eval()
if do_sum:
tf_sparse_ans = sparse_ops.sparse_reduce_sum_sparse(sp_t,
reduction_axes,
keep_dims)
else:
tf_sparse_ans = sparse_ops.sparse_reduce_max_sparse(sp_t,
reduction_axes,
keep_dims)
# Convert to dense for comparison purposes.
out_sparse = sparse_ops.sparse_tensor_to_dense(tf_sparse_ans).eval()
self.assertAllClose(np_ans, out_dense)
self.assertAllClose(np_ans, out_sparse)
def _compare_all(self, sp_t, reduction_axes, ndims):
self._compare(sp_t, reduction_axes, ndims, False, False)
self._compare(sp_t, reduction_axes, ndims, False, True)
self._compare(sp_t, reduction_axes, ndims, True, False)
self._compare(sp_t, reduction_axes, ndims, True, True)
def testSimpleAndRandomInputs(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with self.test_session(use_gpu=False):
self._compare_all(sp_t, None, ndims=2)
self._compare_all(sp_t, 0, ndims=2)
self._compare_all(sp_t, [1], ndims=2)
self._compare_all(sp_t, [0, 1], ndims=2)
self._compare_all(sp_t, [1, 0], ndims=2)
self._compare_all(sp_t, [-1], ndims=2)
self._compare_all(sp_t, [1, -2], ndims=2)
np.random.seed(1618)
test_dims = [(1618, 1, 11, 7, 1), (1,), (1, 1, 1)]
with self.test_session(use_gpu=False):
for dims in test_dims:
sp_t, unused_nnz = _sparsify(np.random.randn(*dims))
# reduce all using None
self._compare_all(sp_t, None, ndims=len(dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
self._compare_all(sp_t, axes, ndims=len(dims))
def testInvalidAxes(self):
sp_t = sparse_tensor.SparseTensor(self.ind, self.vals, self.dense_shape)
with self.test_session(use_gpu=False):
with self.assertRaisesOpError("Invalid reduction dimension -3"):
sparse_ops.sparse_reduce_sum(sp_t, -3).eval()
with self.assertRaisesOpError("Invalid reduction dimension 2"):
sparse_ops.sparse_reduce_sum(sp_t, 2).eval()
with self.assertRaisesOpError("Invalid reduction dimension -3"):
sparse_ops.sparse_reduce_max(sp_t, -3).eval()
with self.assertRaisesOpError("Invalid reduction dimension 2"):
sparse_ops.sparse_reduce_max(sp_t, 2).eval()
def testGradient(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
np.random.seed(8161)
test_dims = [(11, 1, 5, 7, 1), (2, 2)]
with self.test_session(use_gpu=False):
for dims in test_dims:
sp_t, nnz = _sparsify(np.random.randn(*dims))
# reduce random axes from 1D to N-D
for d in range(1, len(dims) + 1):
axes = np.random.choice(len(dims), size=d, replace=False).tolist()
reduced = sparse_ops.sparse_reduce_sum(sp_t, axes)
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
reduced,
reduced.eval().shape)
self.assertLess(err, 1e-3)
# Tests for negative axes.
reduced = sparse_ops.sparse_reduce_sum(sp_t, -1)
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
reduced,
reduced.eval().shape)
self.assertLess(err, 1e-3)
class SparseMathOpsTest(test_util.TensorFlowTestCase):
def _check(self, result_tensor, result_np, input_sp_t):
self.assertTrue(isinstance(result_tensor, sparse_tensor.SparseTensor))
self.assertTrue(isinstance(input_sp_t, sparse_tensor.SparseTensor))
self.assertAllEqual(input_sp_t.indices.eval(), result_tensor.indices.eval())
self.assertAllEqual(input_sp_t.dense_shape.eval(),
result_tensor.dense_shape.eval())
res_densified = sparse_ops.sparse_to_dense(result_tensor.indices,
result_tensor.dense_shape,
result_tensor.values).eval()
self.assertAllEqual(result_np, res_densified)
def testCwiseDivAndMul(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, unused_nnz = _sparsify(sp_vals_np, thresh=1.5)
sp_t_densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
dense_t = constant_op.constant(dense_vals_np)
self._check(sp_t / dense_t, sp_t_densified / dense_vals_np, sp_t)
# Check commutative.
self._check(sp_t * dense_t, sp_t_densified * dense_vals_np, sp_t)
self._check(dense_t * sp_t, sp_t_densified * dense_vals_np, sp_t)
if dtype in [np.int32, np.int64]:
res = sp_t / dense_t # should invoke "__truediv__"
self.assertEqual(res.values.eval().dtype, np.float64)
def testCwiseAdd(self):
with self.test_session(use_gpu=False):
# Identity(2) + AllOnes(2,2). Should be equal to 2 * Identity(2).
indices = [[0, 0], [1, 1]]
vals = [1, 1]
shape = (2, 2)
sp_t = sparse_tensor.SparseTensor(indices, vals, shape)
dense_t = array_ops.ones(shape, dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
# Variant of above, but broadcasts the dense side.
dense_t = array_ops.ones([1], dtype=dtypes.int32)
self._check(
sparse_ops.sparse_dense_cwise_add(sp_t, dense_t),
np.identity(2) * 2, sp_t)
def testGradients(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, nnz = _sparsify(sp_vals_np, thresh=1.5)
dense_t = constant_op.constant(dense_vals_np)
cmul = sp_t * dense_t
err = gradient_checker.compute_gradient_error([sp_t.values, dense_t],
[(nnz,), dense_shape],
cmul.values, (nnz,))
self.assertLess(err, 1e-4)
cdiv = sp_t / dense_t
err = gradient_checker.compute_gradient_error(sp_t.values, (nnz,),
cdiv.values, (nnz,))
self.assertLess(err, 1e-4)
err = gradient_checker.compute_gradient_error(
dense_t,
dense_shape,
cdiv.values, (nnz,),
x_init_value=dense_vals_np)
self.assertLess(err, 2e-4)
class SparseSoftmaxTest(test_util.TensorFlowTestCase):
def testEquivalentToDensified(self):
np.random.seed(1618)
n, m = np.random.choice(20, size=2)
for dtype in [np.float32, np.float64]:
sp_vals_np = np.random.rand(n, m).astype(dtype)
batched_sp_t, unused_nnz1 = _sparsify(
sp_vals_np.reshape((1, n, m)), thresh=0.) # No masking.
with self.test_session(use_gpu=False):
densified = constant_op.constant(sp_vals_np)
sp_result = sparse_ops.sparse_softmax(batched_sp_t).eval(
).values.reshape((n, m))
dense_result = nn_ops.softmax(densified)
self.assertAllClose(dense_result.eval(), sp_result)
def testHigherRanks(self):
# For the first shape:
# First batch:
# [? e.]
# [1. ? ]
# Second batch:
# [e ? ]
# [e e ]
#
# The softmax results should be:
# [? 1.] [1 ?]
# [1. ? ] and [.5 .5]
# where ? means implicitly zero.
#
# The second shape: same input data, but with a higher-rank shape.
shapes = [[2, 2, 2], [2, 1, 2, 2]]
for shape in shapes:
values = np.asarray(
[0., np.e, 1., 0., np.e, 0., np.e, np.e]).reshape(shape)
sp_t, unused_nnz = _sparsify(values, thresh=1e-2)
expected_values = [1., 1., 1., .5, .5]
with self.test_session(use_gpu=False):
result = sparse_ops.sparse_softmax(sp_t).eval()
self.assertAllEqual(expected_values, result.values)
self.assertAllEqual(sp_t.indices.eval(), result.indices)
self.assertAllEqual(shape, result.dense_shape)
def testGradient(self):
x_shape = [2, 5, 10]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64]:
x_np = np.random.randn(*x_shape).astype(dtype)
x_tf, nnz = _sparsify(x_np)
y_tf = sparse_ops.sparse_softmax(x_tf)
err = gradient_checker.compute_gradient_error(x_tf.values, (nnz,),
y_tf.values, (nnz,))
self.assertLess(err, 1e-4)
class SparseMinimumMaximumTest(test_util.TensorFlowTestCase):
def _assertSparseTensorValueEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testBasic(self):
with self.test_session(use_gpu=False):
# 1-D, values at index 0.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_one).eval()
self._assertSparseTensorValueEqual(sp_one.eval(), max_tf)
self._assertSparseTensorValueEqual(sp_zero.eval(), min_tf)
# Values at different indices.
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [7])
sp_zero_2 = sparse_tensor.SparseTensor([[1]], [0], [7])
expected = sparse_tensor.SparseTensor([[0], [1]], [0, 0], [7])
max_tf = sparse_ops.sparse_maximum(sp_zero, sp_zero_2).eval()
min_tf = sparse_ops.sparse_minimum(sp_zero, sp_zero_2).eval()
self._assertSparseTensorValueEqual(expected.eval(), max_tf)
self._assertSparseTensorValueEqual(expected.eval(), min_tf)
def testRandom(self):
np.random.seed(1618)
shapes = [(13,), (6, 8), (1, 7, 1)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
a_np = np.random.randn(*shape).astype(dtype)
b_np = np.random.randn(*shape).astype(dtype)
sp_a, unused_a_nnz = _sparsify(a_np, thresh=-.5)
sp_b, unused_b_nnz = _sparsify(b_np, thresh=-.5)
with self.test_session(use_gpu=False):
maximum_tf = sparse_ops.sparse_maximum(sp_a, sp_b)
maximum_tf_densified = sparse_ops.sparse_tensor_to_dense(
maximum_tf).eval()
minimum_tf = sparse_ops.sparse_minimum(sp_a, sp_b)
minimum_tf_densified = sparse_ops.sparse_tensor_to_dense(
minimum_tf).eval()
a_densified = sparse_ops.sparse_tensor_to_dense(sp_a).eval()
b_densified = sparse_ops.sparse_tensor_to_dense(sp_b).eval()
self.assertAllEqual(
np.maximum(a_densified, b_densified), maximum_tf_densified)
self.assertAllEqual(
np.minimum(a_densified, b_densified), minimum_tf_densified)
def testMismatchedShapes(self):
with self.test_session(use_gpu=False):
sp_zero = sparse_tensor.SparseTensor([[0, 0]], [0], [1, 1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands do not have the same ranks"):
sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
sp_zero = sparse_tensor.SparseTensor([[0]], [0], [1])
sp_one = sparse_tensor.SparseTensor([[0]], [1], [2])
with self.assertRaisesOpError("Operands' shapes do not match"):
sparse_ops.sparse_maximum(sp_zero, sp_one).eval()
class SparseTransposeTest(test.TestCase):
def testTranspose(self):
if np.__version__ == "1.13.0":
self.skipTest("numpy 1.13.0 bug")
with self.test_session(use_gpu=False):
np.random.seed(1618)
shapes = [np.random.randint(1, 10, size=rank) for rank in range(1, 6)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float32, np.float64]:
dn_input = np.random.randn(*shape).astype(dtype)
rank = array_ops.rank(dn_input).eval()
perm = np.random.choice(rank, rank, False)
sp_input, unused_a_nnz = _sparsify(dn_input)
sp_trans = sparse_ops.sparse_transpose(sp_input, perm=perm)
dn_trans = sparse_ops.sparse_tensor_to_dense(sp_trans).eval()
expected_trans = array_ops.transpose(dn_input, perm=perm).eval()
self.assertAllEqual(expected_trans.shape, sp_trans.get_shape())
self.assertAllEqual(dn_trans, expected_trans)
class SparsePlaceholderTest(test.TestCase):
def testPlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=(10, 47))
self.assertAllEqual([10, 47], foo.get_shape())
self.assertAllEqual([None, 2], foo.indices.get_shape().as_list())
def testPartialShapePlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=(None, 47))
self.assertAllEqual([None, None], foo.get_shape().as_list())
self.assertAllEqual([None, 2], foo.indices.get_shape().as_list())
def testNoShapePlaceholder(self):
foo = array_ops.sparse_placeholder(dtypes.float32, shape=None)
self.assertAllEqual(None, foo.get_shape())
self.assertAllEqual([None, None], foo.indices.get_shape().as_list())
if __name__ == "__main__":
googletest.main()
|
{
"content_hash": "15f0a84050eb1c2fb97d1223c00a9758",
"timestamp": "",
"source": "github",
"line_count": 967,
"max_line_length": 80,
"avg_line_length": 41.23267838676318,
"alnum_prop": 0.6134881621187801,
"repo_name": "dancingdan/tensorflow",
"id": "79efee3f5b87c6aa1e4b3adf24862bd496027e33",
"size": "40561",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/sparse_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "339398"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49741628"
},
{
"name": "CMake",
"bytes": "195409"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1254047"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "867093"
},
{
"name": "Jupyter Notebook",
"bytes": "2604735"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "58612"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41593453"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "476832"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
from mycroft.package_cls import Package
from mycroft.services.service_plugin import ServicePlugin
class PackageService(ServicePlugin):
def __init__(self, rt):
super().__init__(rt)
self._package = Package()
def add_struct(self, struct):
"""
Register a data structure as part of the global package
Example:
>>> self.rt.package.add_struct({'album_art': {'url': str}})
>>> def my_skill_handler(p: Package):
... p.album_art.url = 'http://foo.com/bar.png'
"""
self._package.add_struct(struct)
def __setattr__(self, key, value):
if key in ('config', 'rt') or key.startswith('_'):
return object.__setattr__(self, key, value)
return self._package.__setattr__(key, value)
def __getattr__(self, item):
try:
return object.__getattribute__(self, item)
except AttributeError:
pass
return self._package.__getattribute__(item)
def __call__(self, **kwargs):
"""Get an empty package instance"""
return deepcopy(self._package).add(**kwargs)
|
{
"content_hash": "87aeaa63248dc52f224285e0d82d41db",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 71,
"avg_line_length": 32.30555555555556,
"alnum_prop": 0.5778159931212382,
"repo_name": "MatthewScholefield/mycroft-simple",
"id": "dabb026a15c9dc7d62a658ca13ea00b60fc70e56",
"size": "1163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mycroft/services/package_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "127338"
},
{
"name": "Shell",
"bytes": "4729"
}
],
"symlink_target": ""
}
|
from os.path import join
import pathlib
import numpy as np
def _is_32bit():
return np.intp(0).itemsize < 8
def check_propack_submodule():
if not (pathlib.Path(__file__).parent / 'PROPACK/README').exists():
raise RuntimeError("Missing the `PROPACK` submodule! Run "
"`git submodule update --init` to fix this.")
def configuration(parent_package='', top_path=None):
from numpy.distutils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
from scipy._build_utils import (gfortran_legacy_flag_hook,
get_g77_abi_wrappers,
needs_g77_abi_wrapper)
lapack_opt = get_info('lapack_opt')
pre_build_hook = gfortran_legacy_flag_hook
f2py_options = None
if not lapack_opt:
raise NotFoundError('no lapack/blas resources found')
config = Configuration('_propack', parent_package, top_path)
# ------------------------------------------------------------
# Set up the libraries.
# We need a different python extension file for each, because
# names reuse between functions in the LAPACK extensions. This
# could probably be remedied with some work.
# NOTES: this might not longer apply now that we build without
# LAPACK extensions
type_dict = dict(s='single',
d='double',
c='complex8',
z='complex16')
check_propack_submodule()
for prefix, directory in type_dict.items():
propack_lib = f'_{prefix}propack'
# Use risc msg implementation for 64-bit machines, pentium for 32-bit
src = list((pathlib.Path(
__file__).parent / 'PROPACK' / directory).glob('*.F'))
if _is_32bit():
# don't ask me why, 32-bit blows up without second.F
src = [str(p) for p in src if 'risc' not in str(p)]
else:
src = [str(p) for p in src
if 'pentium' not in str(p) and 'second' not in str(p)]
if not _is_32bit():
# don't ask me why, 32-bit blows up with this wrapper
src += get_g77_abi_wrappers(lapack_opt)
cmacros = [('_OPENMP',)]
if needs_g77_abi_wrapper(lapack_opt):
cmacros += [('SCIPY_USE_G77_CDOTC_WRAP', 1)]
config.add_library(propack_lib,
sources=src,
macros=cmacros,
depends=['setup.py'])
ext = config.add_extension(f'_{prefix}propack',
sources=f'{prefix}propack.pyf',
libraries=[propack_lib],
extra_info=lapack_opt,
undef_macros=['_OPENMP'],
f2py_options=f2py_options,
depends=['setup.py'] + src)
ext._pre_build_hook = pre_build_hook
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
{
"content_hash": "c67de5b5f23a3d5a5dd776aa7d18743c",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 77,
"avg_line_length": 37.09411764705882,
"alnum_prop": 0.5331430383761497,
"repo_name": "vigna/scipy",
"id": "b593cbca99d73954c851b6e30bfc04a077bd49a7",
"size": "3153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/sparse/linalg/_propack/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4491892"
},
{
"name": "C++",
"bytes": "960140"
},
{
"name": "Cython",
"bytes": "1050681"
},
{
"name": "Dockerfile",
"bytes": "9839"
},
{
"name": "Fortran",
"bytes": "5299482"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Meson",
"bytes": "141627"
},
{
"name": "Python",
"bytes": "14969167"
},
{
"name": "Shell",
"bytes": "3533"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'testproject/test.db3'
}
}
INSTALLED_APPS = ["django_auth_ldap",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.admin",
"django.contrib.sites",
"django.contrib.sessions",
]
SITE_ID = 1
ROOT_URLCONF = "testproject.urls"
SECRET_KEY = "1"
|
{
"content_hash": "67db1361c78346a4cebc75673938c881",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 48,
"avg_line_length": 29.125,
"alnum_prop": 0.5064377682403434,
"repo_name": "pexip/os-django-auth-ldap",
"id": "9092f80dfcb4a29a2bdb77b595bcce7d005aa6a7",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "debian/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "16498"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "Python",
"bytes": "101379"
}
],
"symlink_target": ""
}
|
import collections
AccessToken = collections.namedtuple('AccessToken', ['access_token', 'expires_in'])
class LinkedInRecipient(object):
def __init__(self, member_id, email, first_name, last_name):
assert member_id or email, 'Either member ID or email must be given'
if member_id:
self.member_id = str(member_id)
else:
self.member_id = None
self.email = email
self.first_name = first_name
self.last_name = last_name
@property
def json(self):
result = {'person': None}
if self.member_id:
result['person'] = {'_path': '/people/id=%s' % self.member_id}
else:
result['person'] = {'_path': '/people/email=%s' % self.email}
if self.first_name:
result['person']['first-name'] = self.first_name
if self.last_name:
result['person']['last-name'] = self.last_name
return result
class LinkedInInvitation(object):
def __init__(self, subject, body, recipients, connect_type, auth_name=None,
auth_value=None):
self.subject = subject
self.body = body
self.recipients = recipients
self.connect_type = connect_type
self.auth_name = auth_name
self.auth_value = auth_value
@property
def json(self):
result = {
'recipients': {
'values': []
},
'subject': self.subject,
'body': self.body,
'item-content': {
'invitation-request': {
'connect-type': self.connect_type
}
}
}
for recipient in self.recipients:
result['recipients']['values'].append(recipient.json)
if self.auth_name and self.auth_value:
auth = {'name': self.auth_name, 'value': self.auth_value}
result['item-content']['invitation-request']['authorization'] = auth
return result
class LinkedInMessage(object):
def __init__(self, subject, body, recipients, auth_name=None,
auth_value=None):
self.subject = subject
self.body = body
self.recipients = recipients
self.auth_name = auth_name
self.auth_value = auth_value
@property
def json(self):
result = {
'recipients': {
'values': []
},
'subject': self.subject,
'body': self.body,
}
for recipient in self.recipients:
result['recipients']['values'].append(recipient.json)
if self.auth_name and self.auth_value:
auth = {'name': self.auth_name, 'value': self.auth_value}
result['item-content']['invitation-request']['authorization'] = auth
return result
|
{
"content_hash": "4525d17a27dd5258b7af664b28b28573",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 83,
"avg_line_length": 31.462365591397848,
"alnum_prop": 0.525974025974026,
"repo_name": "marshallhumble/python-linkedin",
"id": "4d12267ce244ad6e284b24cf3fd18cb41c8ac1f9",
"size": "2926",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "linkedin/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30898"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: 4c363bf9af2
Revises: 575dde6b846
Create Date: 2015-03-24 16:15:16.721300
"""
# revision identifiers, used by Alembic.
revision = '4c363bf9af2'
down_revision = '575dde6b846'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('admin_account',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('admin_name', sa.String(length=255), nullable=False),
sa.Column('encrypted_password', sa.String(length=60), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('admin')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('admin',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('admin_name', sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column('encrypted_password', sa.VARCHAR(length=60), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='admin_pkey')
)
op.drop_table('admin_account')
### end Alembic commands ###
|
{
"content_hash": "f0916bd94dc1ebcf17ca6774880ff950",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 95,
"avg_line_length": 30.026315789473685,
"alnum_prop": 0.684487291849255,
"repo_name": "PythonClutch/python-clutch",
"id": "b0a243c3df03f48f4c8e1240c94867fa768c2611",
"size": "1141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/4c363bf9af2_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "419873"
},
{
"name": "HTML",
"bytes": "84080"
},
{
"name": "JavaScript",
"bytes": "246580"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "89791"
}
],
"symlink_target": ""
}
|
import numpy
from chainer.backend import cuda
from chainer import distribution
from chainer.functions.array import repeat
from chainer.functions.array import reshape
from chainer.functions.array import transpose
from chainer.functions.math import prod
from chainer.functions.math import sum as sum_mod
from chainer.utils import array
from chainer.utils import cache
class Independent(distribution.Distribution):
"""Independent distribution.
Args:
distribution (:class:`~chainer.Distribution`): The base distribution
instance to transform.
reinterpreted_batch_ndims (:class:`int`): Integer number of rightmost
batch dims which will be regarded as event dims. When ``None`` all
but the first batch axis (batch axis 0) will be transferred to
event dimensions.
"""
def __init__(self, distribution, reinterpreted_batch_ndims=None):
super(Independent, self).__init__()
self.__distribution = distribution
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = (
self._get_default_reinterpreted_batch_ndims(distribution))
elif reinterpreted_batch_ndims > len(distribution.batch_shape):
raise ValueError(
'reinterpreted_batch_ndims must be less than or equal to the '
'number of dimensions of `distribution.batch_shape`.')
self.__reinterpreted_batch_ndims = reinterpreted_batch_ndims
batch_ndim = (
len(self.distribution.batch_shape)
- self.reinterpreted_batch_ndims)
self.__batch_shape = distribution.batch_shape[:batch_ndim]
self.__event_shape = (
distribution.batch_shape[batch_ndim:]
+ distribution.event_shape)
@property
def distribution(self):
return self.__distribution
@property
def reinterpreted_batch_ndims(self):
return self.__reinterpreted_batch_ndims
@property
def batch_shape(self):
return self.__batch_shape
@property
def event_shape(self):
return self.__event_shape
@cache.cached_property
def covariance(self):
""" The covariance of the independent distribution.
By definition, the covariance of the new
distribution becomes block diagonal matrix. Let
:math:`\\Sigma_{\\mathbf{x}}` be the covariance matrix of the original
random variable :math:`\\mathbf{x} \\in \\mathbb{R}^d`, and
:math:`\\mathbf{x}^{(1)}, \\mathbf{x}^{(2)}, \\cdots \\mathbf{x}^{(m)}`
be the :math:`m` i.i.d. random variables, new covariance matrix
:math:`\\Sigma_{\\mathbf{y}}` of :math:`\\mathbf{y} =
[\\mathbf{x}^{(1)}, \\mathbf{x}^{(2)}, \\cdots, \\mathbf{x}^{(m)}] \\in
\\mathbb{R}^{md}` can be written as
.. math::
\\left[\\begin{array}{ccc}
\\Sigma_{\\mathbf{x}^{1}} & & 0 \\\\
& \\ddots & \\\\
0 & & \\Sigma_{\\mathbf{x}^{m}}
\\end{array} \\right].
Note that this relationship holds only if the covariance matrix of the
original distribution is given analytically.
Returns:
~chainer.Variable: The covariance of the distribution.
"""
num_repeat = array.size_of_shape(
self.distribution.batch_shape[-self.reinterpreted_batch_ndims:])
dim = array.size_of_shape(self.distribution.event_shape)
cov = repeat.repeat(
reshape.reshape(
self.distribution.covariance,
((self.batch_shape) + (1, num_repeat, dim, dim))),
num_repeat, axis=-4)
cov = reshape.reshape(
transpose.transpose(
cov, axes=(
tuple(range(len(self.batch_shape))) + (-4, -2, -3, -1))),
self.batch_shape + (num_repeat * dim, num_repeat * dim))
block_indicator = self.xp.reshape(
self._block_indicator,
tuple([1] * len(self.batch_shape)) + self._block_indicator.shape)
return cov * block_indicator
@property
def entropy(self):
return self._reduce(sum_mod.sum, self.distribution.entropy)
def cdf(self, x):
return self._reduce(prod.prod, self.distribution.cdf(x))
def icdf(self, x):
"""The inverse cumulative distribution function for multivariate variable.
Cumulative distribution function for multivariate variable is not
invertible. This function always raises :class:`RuntimeError`.
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`): Data points in
the codomain of the distribution
Raises:
:class:`RuntimeError`
"""
raise RuntimeError(
'Cumulative distribution function for multivariate variable '
'is not invertible.')
def log_cdf(self, x):
return self._reduce(sum_mod.sum, self.distribution.log_cdf(x))
def log_prob(self, x):
return self._reduce(sum_mod.sum, self.distribution.log_prob(x))
def log_survival_function(self, x):
return self._reduce(
sum_mod.sum, self.distribution.log_survival_function(x))
@property
def mean(self):
return self.distribution.mean
@property
def mode(self):
return self.distribution.mode
@property
def params(self):
return self.distribution.params
def perplexity(self, x):
return self._reduce(prod.prod, self.distribution.perplexity(x))
def prob(self, x):
return self._reduce(prod.prod, self.distribution.prob(x))
def sample_n(self, n):
return self.distribution.sample_n(n)
@property
def stddev(self):
return self.distribution.stddev
@property
def support(self):
return self.distribution.support
def survival_function(self, x):
return self._reduce(prod.prod, self.distribution.survival_function(x))
@property
def variance(self):
return self.distribution.variance
@property
def xp(self):
return self.distribution.xp
def _reduce(self, op, stat):
range_ = tuple(range(-self.reinterpreted_batch_ndims, 0))
return op(stat, axis=range_)
def _get_default_reinterpreted_batch_ndims(self, distribution):
ndims = len(distribution.batch_shape)
return max(0, ndims - 1)
@cache.cached_property
def _block_indicator(self):
num_repeat = array.size_of_shape(
self.distribution.batch_shape[-self.reinterpreted_batch_ndims:])
dim = array.size_of_shape(self.distribution.event_shape)
block_indicator = numpy.fromfunction(
lambda i, j: i // dim == j // dim,
(num_repeat * dim, num_repeat * dim)).astype(int)
if self.xp is cuda.cupy:
block_indicator = cuda.to_gpu(block_indicator)
return block_indicator
@distribution.register_kl(Independent, Independent)
def _kl_independent_independent(dist1, dist2):
"""Computes Kullback-Leibler divergence for independent distributions.
We can leverage the fact that
.. math::
\\mathrm{KL}(
\\mathrm{Independent}(\\mathrm{dist1}) ||
\\mathrm{Independent}(\\mathrm{dist2}))
= \\mathrm{sum}(\\mathrm{KL}(\\mathrm{dist1} || \\mathrm{dist2}))
where the sum is over the ``reinterpreted_batch_ndims``.
Args:
dist1 (:class:`~chainer.distribution.Independent`): Instance of
`Independent`.
dist2 (:class:`~chainer.distribution.Independent`): Instance of
`Independent`.
Returns:
Batchwise ``KL(dist1 || dist2)``.
Raises:
:class:`ValueError`: If the event space for ``dist1`` and ``dist2``,
or their underlying distributions don't match.
"""
p = dist1.distribution
q = dist2.distribution
# The KL between any two (non)-batched distributions is a scalar.
# Given that the KL between two factored distributions is the sum, i.e.
# KL(p1(x)p2(y) || q1(x)q2(y)) = KL(p1 || q1) + KL(q1 || q2), we compute
# KL(p || q) and do a `reduce_sum` on the reinterpreted batch dimensions.
if dist1.event_shape == dist2.event_shape:
if p.event_shape == q.event_shape:
num_reduce_dims = len(dist1.event_shape) - len(p.event_shape)
reduce_dims = tuple([-i - 1 for i in range(0, num_reduce_dims)])
return sum_mod.sum(
distribution.kl_divergence(p, q), axis=reduce_dims)
else:
raise NotImplementedError(
'KL between Independents with different '
'event shapes not supported.')
else:
raise ValueError('Event shapes do not match.')
|
{
"content_hash": "77a2d33feab90d04ee1bdfa77174bc24",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 82,
"avg_line_length": 35.33734939759036,
"alnum_prop": 0.6126832594613024,
"repo_name": "okuta/chainer",
"id": "8d12501d465fd46b9220b391c43bc6d56dfe44c1",
"size": "8799",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chainer/distributions/independent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "70"
},
{
"name": "C++",
"bytes": "1548487"
},
{
"name": "CMake",
"bytes": "51604"
},
{
"name": "Cuda",
"bytes": "128377"
},
{
"name": "Dockerfile",
"bytes": "1457"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "5851909"
},
{
"name": "Shell",
"bytes": "41045"
}
],
"symlink_target": ""
}
|
import json
import os.path
from robot.running import ArgInfo
from robot.errors import DataError
from .datatypes import EnumMember, TypedDictItem, TypeDoc
from .model import LibraryDoc, KeywordDoc
class JsonDocBuilder:
def build(self, path):
spec = self._parse_spec_json(path)
return self.build_from_dict(spec)
def build_from_dict(self, spec):
libdoc = LibraryDoc(name=spec['name'],
doc=spec['doc'],
version=spec['version'],
type=spec['type'],
scope=spec['scope'],
doc_format=spec['docFormat'],
source=spec['source'],
lineno=int(spec.get('lineno', -1)))
libdoc.inits = [self._create_keyword(kw) for kw in spec['inits']]
libdoc.keywords = [self._create_keyword(kw) for kw in spec['keywords']]
# RF >= 5 have 'typedocs', RF >= 4 have 'dataTypes', older/custom may have neither.
if 'typedocs' in spec:
libdoc.type_docs = self._parse_type_docs(spec['typedocs'])
elif 'dataTypes' in spec:
libdoc.type_docs = self._parse_data_types(spec['dataTypes'])
return libdoc
def _parse_spec_json(self, path):
if not os.path.isfile(path):
raise DataError(f"Spec file '{path}' does not exist.")
with open(path) as json_source:
libdoc_dict = json.load(json_source)
return libdoc_dict
def _create_keyword(self, data):
kw = KeywordDoc(name=data.get('name'),
doc=data['doc'],
shortdoc=data['shortdoc'],
tags=data['tags'],
private=data.get('private', False),
deprecated=data.get('deprecated', False),
source=data['source'],
lineno=int(data.get('lineno', -1)))
self._create_arguments(data['args'], kw)
return kw
def _create_arguments(self, arguments, kw: KeywordDoc):
spec = kw.args
setters = {
ArgInfo.POSITIONAL_ONLY: spec.positional_only.append,
ArgInfo.POSITIONAL_ONLY_MARKER: lambda value: None,
ArgInfo.POSITIONAL_OR_NAMED: spec.positional_or_named.append,
ArgInfo.VAR_POSITIONAL: lambda value: setattr(spec, 'var_positional', value),
ArgInfo.NAMED_ONLY_MARKER: lambda value: None,
ArgInfo.NAMED_ONLY: spec.named_only.append,
ArgInfo.VAR_NAMED: lambda value: setattr(spec, 'var_named', value),
}
for arg in arguments:
name = arg['name']
setters[arg['kind']](name)
default = arg.get('defaultValue')
if default is not None:
spec.defaults[name] = default
arg_types = arg['types']
if not spec.types:
spec.types = {}
spec.types[name] = tuple(arg_types)
kw.type_docs[name] = arg.get('typedocs', {})
def _parse_type_docs(self, type_docs):
for data in type_docs:
doc = TypeDoc(data['type'], data['name'], data['doc'], data['accepts'],
data['usages'])
if doc.type == TypeDoc.ENUM:
doc.members = [EnumMember(d['name'], d['value'])
for d in data['members']]
if doc.type == TypeDoc.TYPED_DICT:
doc.items = [TypedDictItem(d['key'], d['type'], d['required'])
for d in data['items']]
yield doc
# Code below used for parsing legacy 'dataTypes'.
def _parse_data_types(self, data_types):
for obj in data_types['enums']:
yield self._create_enum_doc(obj)
for obj in data_types['typedDicts']:
yield self._create_typed_dict_doc(obj)
def _create_enum_doc(self, data):
return TypeDoc(TypeDoc.ENUM, data['name'], data['doc'],
members=[EnumMember(member['name'], member['value'])
for member in data['members']])
def _create_typed_dict_doc(self, data):
return TypeDoc(TypeDoc.TYPED_DICT, data['name'], data['doc'],
items=[TypedDictItem(item['key'], item['type'], item['required'])
for item in data['items']])
|
{
"content_hash": "c76889dc79ebfc4004fb05b1548141ca",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 91,
"avg_line_length": 42.2,
"alnum_prop": 0.5319341006544798,
"repo_name": "robotframework/robotframework",
"id": "2a8657862f50b7ed2988baf6f3cf7a6a99cfeead",
"size": "5075",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/robot/libdocpkg/jsonbuilder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44632"
},
{
"name": "HTML",
"bytes": "86871"
},
{
"name": "JavaScript",
"bytes": "162950"
},
{
"name": "Python",
"bytes": "2764220"
},
{
"name": "RobotFramework",
"bytes": "1260097"
}
],
"symlink_target": ""
}
|
import pickle
import tensorflow as tf
import numpy as np
from keras.layers import Input, Flatten, Dense
from keras.models import Model
flags = tf.app.flags
FLAGS = flags.FLAGS
# command line flags
flags.DEFINE_string('training_file', '', "Bottleneck features training file (.p)")
flags.DEFINE_string('validation_file', '', "Bottleneck features validation file (.p)")
flags.DEFINE_integer('epochs', 50, "The number of epochs.")
flags.DEFINE_integer('batch_size', 256, "The batch size.")
def load_bottleneck_data(training_file, validation_file):
"""
Utility function to load bottleneck features.
Arguments:
training_file - String
validation_file - String
"""
print("Training file", training_file)
print("Validation file", validation_file)
with open(training_file, 'rb') as f:
train_data = pickle.load(f)
with open(validation_file, 'rb') as f:
validation_data = pickle.load(f)
X_train = train_data['features']
y_train = train_data['labels']
X_val = validation_data['features']
y_val = validation_data['labels']
return X_train, y_train, X_val, y_val
def main(_):
# load bottleneck data
X_train, y_train, X_val, y_val = load_bottleneck_data(FLAGS.training_file, FLAGS.validation_file)
print(X_train.shape, y_train.shape)
print(X_val.shape, y_val.shape)
nb_classes = len(np.unique(y_train))
input_shape = X_train.shape[1:]
inp = Input(shape=input_shape)
x = Flatten()(inp)
x = Dense(nb_classes, activation='softmax')(x)
model = Model(inp, x)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# train model
model.fit(X_train, y_train, nb_epoch=FLAGS.epochs, batch_size=FLAGS.batch_size, validation_data=(X_val, y_val), shuffle=True)
# parses flags and calls the `main` function above
if __name__ == '__main__':
tf.app.run()
|
{
"content_hash": "350f824bf8c83b5491b046330e37d510",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 129,
"avg_line_length": 31.9,
"alnum_prop": 0.6739811912225705,
"repo_name": "DavidObando/carnd",
"id": "fba0bd1a3972a9a7acb428909bdfe51c04d7ad98",
"size": "1914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Term1/Labs/CarND-Transfer-Learning-Lab/feature_extraction.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1026"
},
{
"name": "C",
"bytes": "397957"
},
{
"name": "C++",
"bytes": "19427858"
},
{
"name": "CMake",
"bytes": "287149"
},
{
"name": "CSS",
"bytes": "5383"
},
{
"name": "Cuda",
"bytes": "131738"
},
{
"name": "Dockerfile",
"bytes": "2543"
},
{
"name": "Fortran",
"bytes": "1326303"
},
{
"name": "HTML",
"bytes": "5743866"
},
{
"name": "JavaScript",
"bytes": "7839"
},
{
"name": "Jupyter Notebook",
"bytes": "28650718"
},
{
"name": "Makefile",
"bytes": "3707"
},
{
"name": "Python",
"bytes": "327683"
},
{
"name": "Shell",
"bytes": "25869"
}
],
"symlink_target": ""
}
|
"""Test manipulate.py for vimiv's test suite."""
import os
import shutil
from unittest import main
from vimiv_testcase import (VimivTestCase, compare_files, compare_pixbufs,
refresh_gui)
class ManipulateTest(VimivTestCase):
"""Manipulate Tests."""
@classmethod
def setUpClass(cls):
if os.path.isdir("vimiv/testimages_man"):
shutil.rmtree("vimiv/testimages_man")
shutil.copytree("vimiv/testimages", "vimiv/testimages_man")
cls.init_test(cls, ["vimiv/testimages_man/arch-logo.png"])
cls.manipulate = cls.vimiv["manipulate"]
# Wait for image as this is used in manipulate
while not cls.vimiv["image"].get_pixbuf():
refresh_gui(0.1)
def setUp(self):
"""Set up by opening manipulate. Test half of toggling."""
self.manipulate.toggle()
refresh_gui()
self.assertTrue(self.manipulate.is_visible())
self.assertTrue(self.manipulate.sliders["bri"].is_focus())
def test_manipulate_image(self):
"""Test manipulate image."""
# Copy image before manipulation
tmpfile = "tmp.jpg"
if os.path.exists(tmpfile):
os.remove(tmpfile)
shutil.copyfile(self.vimiv.get_path(), tmpfile)
# Leaving with False should not change the image
self.manipulate.cmd_edit("bri", "20")
self.manipulate.finish(False)
self.assertTrue(compare_files(tmpfile, self.vimiv.get_path()))
# Image is different to copied backup after manipulations
self.manipulate.toggle()
self.manipulate.cmd_edit("bri", "20")
self.manipulate.finish(True)
self.assertFalse(compare_files(tmpfile, self.vimiv.get_path()))
self.manipulate.toggle() # Re-open to keep state equal
def test_write_image(self):
"""Write an image to disk from manipulate."""
# Copy image before manipulation
tmpfile = "tmp.jpg"
if os.path.exists(tmpfile):
os.remove(tmpfile)
shutil.copyfile(self.vimiv.get_path(), tmpfile)
# Image is different to copied backup after manipulations
self.manipulate.cmd_edit("bri", "20")
self.vimiv["transform"].write()
self.assertFalse(compare_files(tmpfile, self.vimiv.get_path()))
self.manipulate.toggle() # Re-open to keep state equal
def test_focus_sliders(self):
"""Focusing sliders in manipulate."""
self.assertTrue(self.manipulate.sliders["bri"].is_focus())
self.manipulate.focus_slider("con")
self.assertTrue(self.manipulate.sliders["con"].is_focus())
self.manipulate.focus_slider("sat")
self.assertTrue(self.manipulate.sliders["sat"].is_focus())
self.manipulate.focus_slider("bri")
self.assertTrue(self.manipulate.sliders["bri"].is_focus())
# Slider does not exist
self.manipulate.focus_slider("val")
self.check_statusbar("ERROR: No slider called val")
def test_change_slider_value(self):
"""Change slider value in manipulate."""
# Change value
self.manipulate.focus_slider("bri")
self.manipulate.change_slider("-1")
received_value = self.manipulate.sliders["bri"].get_value()
self.assertEqual(received_value, -1)
# Change value with a numstr
self.vimiv["eventhandler"].set_num_str(5)
self.manipulate.change_slider("2")
received_value = self.manipulate.sliders["bri"].get_value()
self.assertEqual(received_value, -1 + 2 * 5)
# Not an integer
self.manipulate.change_slider("hi")
self.check_statusbar("ERROR: Could not convert 'hi' to int")
received_value = self.manipulate.sliders["bri"].get_value()
self.assertEqual(received_value, -1 + 2 * 5)
def test_cmd_edit(self):
"""Test manipulating from command line commands."""
pb_1 = self.vimiv["image"].get_pixbuf()
# Just call the function
self.manipulate.cmd_edit("sat", "20")
self.assertEqual(self.manipulate.sliders["sat"].get_value(), 20)
self.assertTrue(self.manipulate.sliders["sat"].is_focus())
pb_2 = self.vimiv["image"].get_pixbuf()
self.assertFalse(compare_pixbufs(pb_1, pb_2))
# Set contrast via command line
self.run_command("edit con 35")
self.assertEqual(self.manipulate.sliders["con"].get_value(), 35)
self.assertTrue(self.manipulate.sliders["con"].is_focus())
pb_3 = self.vimiv["image"].get_pixbuf()
self.assertFalse(compare_pixbufs(pb_2, pb_3))
# No argument means 0
self.run_command("edit con")
self.assertEqual(self.manipulate.sliders["con"].get_value(), 0)
pb_4 = self.vimiv["image"].get_pixbuf()
self.assertFalse(compare_pixbufs(pb_3, pb_4))
self.assertTrue(compare_pixbufs(pb_2, pb_4)) # We reset contrast
# Close via command line
self.run_command("discard_changes")
self.assertFalse(self.manipulate.sliders["sat"].is_focus())
# Error: not a valid integer for manipulation
self.run_command("edit bri value")
self.assertFalse(self.manipulate.sliders["bri"].is_focus())
self.check_statusbar("ERROR: Argument must be of type integer")
def test_check_for_edit(self):
"""Check if an image was edited."""
self.assertEqual(0, self.manipulate.check_for_edit(False))
self.manipulate.cmd_edit("bri", "10")
self.assertEqual(1, self.manipulate.check_for_edit(False))
self.assertEqual(0, self.manipulate.check_for_edit(True))
def test_quit_with_edited_image(self):
"""Quit vimiv with an edited image."""
self.manipulate.cmd_edit("bri", "10")
self.vimiv.quit_wrapper()
self.check_statusbar("WARNING: Image has been edited, add ! to force")
def tearDown(self):
"""Tear down by closing manipulate. Test other half of toggling."""
self.manipulate.finish(False)
self.assertFalse(self.manipulate.sliders["bri"].is_focus())
@classmethod
def tearDownClass(cls):
cls.vimiv.quit()
os.chdir(cls.working_directory)
if os.path.isdir("./vimiv/testimages_man"):
shutil.rmtree("vimiv/testimages_man")
if __name__ == "__main__":
main()
|
{
"content_hash": "a4dbc2a5e0abbd188e010835ea621858",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 78,
"avg_line_length": 42.2,
"alnum_prop": 0.6322274881516587,
"repo_name": "karlch/vimiv",
"id": "4f3adaa6171f692081fc32ebff02d5bf64957691",
"size": "6380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/manipulate_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "11958"
},
{
"name": "Makefile",
"bytes": "2608"
},
{
"name": "Python",
"bytes": "387798"
},
{
"name": "Shell",
"bytes": "2371"
}
],
"symlink_target": ""
}
|
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.mgobe.v20201014 import models
class MgobeClient(AbstractClient):
_apiVersion = '2020-10-14'
_endpoint = 'mgobe.tencentcloudapi.com'
_service = 'mgobe'
def ChangeRoomPlayerProfile(self, request):
"""此接口无法使用,游戏联机对战引擎MGOBE已于6.1正式下架,感谢您的支持
修改房间玩家自定义属性
:param request: Request instance for ChangeRoomPlayerProfile.
:type request: :class:`tencentcloud.mgobe.v20201014.models.ChangeRoomPlayerProfileRequest`
:rtype: :class:`tencentcloud.mgobe.v20201014.models.ChangeRoomPlayerProfileResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("ChangeRoomPlayerProfile", params, headers=headers)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ChangeRoomPlayerProfileResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ChangeRoomPlayerStatus(self, request):
"""此接口无法使用,游戏联机对战引擎MGOBE已于6.1正式下架,感谢您的支持
修改玩家自定义状态
:param request: Request instance for ChangeRoomPlayerStatus.
:type request: :class:`tencentcloud.mgobe.v20201014.models.ChangeRoomPlayerStatusRequest`
:rtype: :class:`tencentcloud.mgobe.v20201014.models.ChangeRoomPlayerStatusResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("ChangeRoomPlayerStatus", params, headers=headers)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ChangeRoomPlayerStatusResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribePlayer(self, request):
"""此接口无法使用,游戏联机对战引擎MGOBE已于6.1正式下架,感谢您的支持
该接口用于查询玩家信息。支持两种用法,当OpenId不传的时候,PlayerId必传,传入PlayerId可以查询当前PlayerId的玩家信息,当OpenId传入的时候,PlayerId可不传,按照OpenId查询玩家信息。
:param request: Request instance for DescribePlayer.
:type request: :class:`tencentcloud.mgobe.v20201014.models.DescribePlayerRequest`
:rtype: :class:`tencentcloud.mgobe.v20201014.models.DescribePlayerResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribePlayer", params, headers=headers)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribePlayerResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DescribeRoom(self, request):
"""此接口无法使用,游戏联机对战引擎MGOBE已于6.1正式下架,感谢您的支持
该接口用于查询房间信息。支持两种用法,当房间Id不传的时候,玩家Id必传,传入玩家Id可以查询当前玩家所在的房间信息,当房间Id传入的时候,玩家Id可不传,按照房间Id查询房间信息。
:param request: Request instance for DescribeRoom.
:type request: :class:`tencentcloud.mgobe.v20201014.models.DescribeRoomRequest`
:rtype: :class:`tencentcloud.mgobe.v20201014.models.DescribeRoomResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DescribeRoom", params, headers=headers)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DescribeRoomResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def DismissRoom(self, request):
"""此接口无法使用,游戏联机对战引擎MGOBE已于6.1正式下架,感谢您的支持
通过game_id、room_id解散房间
:param request: Request instance for DismissRoom.
:type request: :class:`tencentcloud.mgobe.v20201014.models.DismissRoomRequest`
:rtype: :class:`tencentcloud.mgobe.v20201014.models.DismissRoomResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("DismissRoom", params, headers=headers)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.DismissRoomResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def ModifyRoom(self, request):
"""此接口无法使用,游戏联机对战引擎MGOBE已于6.1正式下架,感谢您的支持
修改房间
:param request: Request instance for ModifyRoom.
:type request: :class:`tencentcloud.mgobe.v20201014.models.ModifyRoomRequest`
:rtype: :class:`tencentcloud.mgobe.v20201014.models.ModifyRoomResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("ModifyRoom", params, headers=headers)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.ModifyRoomResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def RemoveRoomPlayer(self, request):
"""此接口无法使用,游戏联机对战引擎MGOBE已于6.1正式下架,感谢您的支持
踢出房间玩家
:param request: Request instance for RemoveRoomPlayer.
:type request: :class:`tencentcloud.mgobe.v20201014.models.RemoveRoomPlayerRequest`
:rtype: :class:`tencentcloud.mgobe.v20201014.models.RemoveRoomPlayerResponse`
"""
try:
params = request._serialize()
headers = request.headers
body = self.call("RemoveRoomPlayer", params, headers=headers)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.RemoveRoomPlayerResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
|
{
"content_hash": "0865583866bd20237b69640cb09a50bd",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 121,
"avg_line_length": 40.29385964912281,
"alnum_prop": 0.602590617176445,
"repo_name": "tzpBingo/github-trending",
"id": "bd5cb15f641fc3e5d80acdcf37966d719ea92738",
"size": "10603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codespace/python/tencentcloud/mgobe/v20201014/mgobe_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
}
|
import hashlib
from multiprocessing import Pool
import unittest
from unittest import TestCase
from echoback_server import EchobackIPCServer, EchobackIPCClient
from .. import IPCAvailable
__author__ = 'matheus2740'
from .. import BaseIPCServer, BaseIPCClient
class IPCServerTests(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_echoback_server(self):
server = EchobackIPCServer()
try:
echoback_client_call(123)
finally:
server.shutdown()
server = EchobackIPCServer()
try:
for i in range(50):
echoback_client_call("abc"+str(i))
finally:
server.shutdown()
def test_echoback_server_parallel(self):
server = EchobackIPCServer()
pool = Pool(10)
for i in range(1000):
pool.apply_async(echoback_client_call, args=(i,))
print 'applyed'
pool.close()
pool.join()
print "pool is dead"
server.shutdown()
def test_hash_server(self):
server = BaseIPCServer()
try:
data = "my test string"
client = BaseIPCClient()
received = client.mhash(data)
print 'received:', received
assert received == mhash(data)
finally:
server.shutdown()
@IPCAvailable(BaseIPCServer)
def mhash(string):
return hashlib.md5(string).hexdigest()
def echoback_client_call(arg):
client = EchobackIPCClient()
received = client.teste(arg)
assert received['a'][0] == arg
print "successfull:", arg
client.disconnect()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "52ce64384e84f10085330f0cbc73d165",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 64,
"avg_line_length": 21.25,
"alnum_prop": 0.5952941176470589,
"repo_name": "s1mbi0se/s1ipc",
"id": "2b51b612192113b03d8ec01e3ae21597b0facea4",
"size": "1715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "s1ipc/tests/base_server_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35345"
}
],
"symlink_target": ""
}
|
"""
Collection: database interface class.
These classes provide an interface between the database and the top-level
ingest algorithm (AbstractIngester and its subclasses). They also provide
the implementation of the database and tile store side of the ingest
process. They are expected to be independent of the structure of any
particular dataset, but will change if the database schema or tile store
format changes.
"""
import logging
import os
import time
import shutil
from agdc.cube_util import DatasetError, create_directory
from tile_contents import TileContents
from acquisition_record import AcquisitionRecord
from ingest_db_wrapper import IngestDBWrapper
# Set up logger.
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
class Collection(object):
"""Collection database interface class."""
#
# Interface methods
#
def __init__(self, datacube):
"""Initialise the collection object."""
self.datacube = datacube
self.db = IngestDBWrapper(datacube.db_connection)
self.new_bands = self.__reindex_bands(datacube.bands)
self.transaction_stack = []
self.temp_tile_directory = os.path.join(self.datacube.tile_root,
'ingest_temp',
self.datacube.process_id)
create_directory(self.temp_tile_directory)
def cleanup(self):
"""Do end-of-process cleanup.
Deletes the process-specific temporary dirctory. Does not
close the database connection (at present), because the datacube
object has a destructor which does that.
"""
shutil.rmtree(self.temp_tile_directory, ignore_errors=True)
@staticmethod
def get_dataset_key(dataset):
"""Return the dataset key for use with the new_bands dictionary.
This is a tuple (satellite_tag, sensor_name, processing_level) except
that for derived datasets (currently PQA and FC) the satellite_tag is
replaced with 'DERIVED' and the processing_level is used as the
sensor_name. So the tuple looks like:
('DERIVED', processing_level, processing_level).
"""
derived_levels = {'PQA', 'FC'}
satellite = dataset.get_satellite_tag()
sensor = dataset.get_sensor_name()
level = dataset.get_processing_level()
if level in derived_levels:
satellite = 'DERIVED'
sensor = level
return (satellite, sensor, level)
def get_temp_tile_directory(self):
"""Return a path to a directory for temporary tile related files."""
return self.temp_tile_directory
def check_metadata(self, dataset):
"""Check that the satellite, sensor, and bands are in the database.
Checks that the dataset is of a kind that the database knows about
(by checking basic metadata), and the bands that the database expects
are present. Raises a DatasetError if the checks fail.
"""
self.__check_satellite_and_sensor(dataset)
self.__check_processing_level(dataset)
self.__check_bands(dataset)
def transaction(self, db=None):
"""Returns a Transaction context manager object.
This is for use in a 'with' statement. It uses the Collection's
database collection if one is not provided.
"""
return Transaction(self.db if db is None else db,
self.transaction_stack)
def lock_datasets(self, dataset_list):
"""Returns a Lock context manager object.
dataset_list is a list of dataset ids for the datasets to be
locked.
This is for use in a 'with' statement. It uses the Collection's
datacube object to manage the individual locks.
"""
lock_list = ['Dataset-' + str(dataset_id)
for dataset_id in dataset_list]
return Lock(self.datacube, lock_list)
def create_acquisition_record(self, dataset):
"""Factory method to create an instance of the AcquisitonRecord class.
This method creates a corresponding record in the database if one
does not already exist.
"""
return AcquisitionRecord(self, dataset)
def create_tile_contents(self, tile_type_id, tile_footprint,
band_stack):
"""Factory method to create an instance of the TileContents class.
The tile_type_dict contains the information required for
resampling extents and resolution.
"""
tile_type_info = self.datacube.tile_type_dict[tile_type_id]
tile_contents = TileContents(self.datacube.tile_root, tile_type_info,
tile_footprint, band_stack)
return tile_contents
def current_transaction(self):
"""Returns the current transaction."""
return self.transaction_stack[-1]
def mark_tile_for_removal(self, tile_pathname):
"""Mark a tile file for removal on transaction commit."""
self.current_transaction().mark_tile_for_removal(tile_pathname)
def mark_tile_for_creation(self, tile_contents):
"""Mark a tile file for creation on transaction commit."""
self.current_transaction().mark_tile_for_creation(tile_contents)
#
# worker methods
#
@staticmethod
def __reindex_bands(bands):
"""Reindex the datacube.bands nested dict structure.
This method returns the new nested dict which is indexed by:
new_bands[dataset_key][tile_type][file_number]
where dataset_key is a tuple:
(satellite_tag, sensor_name, processing_level).
The original indexing is
bands[tile_type][satellite_sensor][file_number]
where satellite_sensor is a tuple:
(satellite_tag, sensor_name)
Note that satellite_tag and sensor_name are replaced by 'DERIVED' and
the processing_level for PQA and FC datasets. This needs to be taken
into account when constructing a dataset_key.
"""
new_bands = {}
for (tile_type, band_dict) in bands.items():
for ((satellite, sensor), sensor_dict) in band_dict.items():
for (file_number, band_info) in sensor_dict.items():
dataset_key = (satellite, sensor, band_info['level_name'])
new_bands.setdefault(dataset_key, {})
new_bands[dataset_key].setdefault(tile_type, {})
new_bands[dataset_key][tile_type][file_number] = band_info
return new_bands
def __check_satellite_and_sensor(self, dataset):
"""Check that the dataset's satellite and sensor are in the database.
Raises a DatasetError if they are not.
"""
satellite_id = self.db.get_satellite_id(dataset.get_satellite_tag())
if satellite_id is None:
raise DatasetError("Unknown satellite tag: '%s'" %
dataset.get_satellite_tag())
sensor_id = self.db.get_sensor_id(satellite_id,
dataset.get_sensor_name())
if sensor_id is None:
msg = ("Unknown satellite and sensor pair: '%s', '%s'" %
(dataset.get_satellite_tag(), dataset.get_sensor_name()))
raise DatasetError(msg)
def __check_processing_level(self, dataset):
"""Check that the dataset's processing_level is in the database.
Raises a DatasetError if it is not.
"""
level_id = self.db.get_level_id(dataset.get_processing_level())
if level_id is None:
raise DatasetError("Unknown processing level: '%s'" %
dataset.get_processing_level())
def __check_bands(self, dataset):
"""Check that the dataset has the expected bands.
Raises a DatasetError if any band expected for this dataset (according
to the database) is missing.
"""
try:
dataset_bands = self.new_bands[self.get_dataset_key(dataset)]
except KeyError:
raise DatasetError('No tile types for this dataset.')
for tile_type_bands in dataset_bands.values():
for band_info in tile_type_bands.values():
dataset.find_band_file(band_info['file_pattern'])
#
# Context manager classes
#
class Transaction(object):
"""Context manager class for a transaction involving tiles.
This is used in a 'with' statement to wrap a transaction.
It handles the commit or roll back of the transaction and
the associated file operations to create and remove tile files
in coordination with the transaction.
"""
def __init__(self, db, tr_stack=None):
"""Initialise the transaction.
db is the database connection to use.
tr_stack is a stack of transactions. If not None, the last item
on the tr_stack should be the current transaction.
tile_remove_list is the list of tile files to remove on commit.
tile_create_list is the list of tile contents to create on commit
(or cleanup on roll back).
previous_commit_mode holds the previous state of the connection
so that it can be restored when the transaction is finished.
Note that tile_create_list is a list of TileContent objects,
while tile_remove_list is a list of pathnames.
"""
self.db = db
self.tr_stack = tr_stack
self.tile_remove_list = None
self.tile_create_list = None
self.previous_commit_mode = None
def __enter__(self):
"""Auto-called on transaction (with statement) entry.
Clears the tile lists and sets the commit mode (saving the old one).
Returns 'self' so that the other methods are available via an
'as' clause.
Note that tile_create_list is a list of TileContent objects,
while tile_remove_list is a list of pathnames.
"""
self.tile_remove_list = []
self.tile_create_list = []
self.previous_commit_mode = self.db.turn_off_autocommit()
if self.tr_stack is not None:
self.tr_stack.append(self)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Auto called on transaction (with statement) exit.
Commits the transaction unless there has been an exception,
in which case it rolls back the transaction. Restores the
old commit mode to the database connection.
This implicitly returns None which causes any exception
to be re-raised.
"""
if exc_type is None:
self.__commit()
else:
self.__rollback()
self.tile_remove_list = None
self.tile_create_list = None
self.db.restore_commit_mode(self.previous_commit_mode)
if self.tr_stack is not None:
tr = self.tr_stack.pop()
assert tr is self, "Unexpected value on transaction stack."
def __commit(self):
"""Commit the transaction while handling tile files."""
# Move tile files to their final location just before
# the commit, to avoid commiting tile records without files.
# Tile files without records are possible if the commit fails
# at the last moment.
for tile_contents in self.tile_create_list:
tile_contents.make_permanent()
self.db.commit()
# Remove tile files just after the commit, to avoid removing
# tile files when the deletion of a tile record has been rolled
# back. Again, tile files without records are possible if there
# is an exception or crash just after the commit.
#
# The tile remove list is filtered against the tile create list
# to avoid removing a file that has just been re-created. It is
# a bad idea to overwrite a tile file in this way (in a single
# transaction), because it will be overwritten just before the
# commit (above) and the wrong file will be in place if the
# transaction is rolled back.
tile_create_set = {t.get_output_path()
for t in self.tile_create_list}
for tile_pathname in self.tile_remove_list:
if tile_pathname not in tile_create_set:
if os.path.isfile(tile_pathname):
os.remove(tile_pathname)
def __rollback(self):
"""Roll back the transaction while handling tile files."""
# Clean up tempoary files that are now not needed.
for tile_contents in self.tile_create_list:
tile_contents.remove()
self.db.rollback()
def mark_tile_for_removal(self, tile_pathname):
"""Mark a tile file for removal on transaction commit.
These tiles will be deleted if the transaction is commited,
but not if it is rolled back.
"""
if tile_pathname not in self.tile_remove_list:
self.tile_remove_list.append(tile_pathname)
def mark_tile_for_creation(self, tile_contents):
"""Mark a tile file for creation on transaction commit.
These tiles will be created (moved to their permenant
location) if the transaction is commited. If the transaction
is rolled back the associated temprorary tile files will be
removed.
tile_contents should be a TileContents object (or at least
implement the interface).
"""
self.tile_create_list.append(tile_contents)
class Lock(object):
"""Context manager class for locking a list of objects.
This is used in a 'with' statement to wrap code which needs the
locks. It handles acquiring and releasing the locks as well as
waiting and retries if the locks cannot be acquired.
Not that this will not work for nested locks/with statements in the
same process that attempt to lock the same object, because the
locking mechanism does not count the number of times an object has
been locked.
"""
DEFAULT_WAIT = 10
DEFAULT_RETRIES = 6
def __init__(self,
datacube,
lock_list,
wait=DEFAULT_WAIT,
retries=DEFAULT_RETRIES):
"""Initialise the lock object.
Positional Arguments:
datacube: The datacube object which manages the individual locks.
lock_list: The list of objects to lock. This is a list of
strings - each string should unambiguously identify the object
that is being locked.
Keyword Arguments:
wait: The amount of time to wait, in seconds, before again trying
to acquire the locks.
retries: The maximum number of attempts before giving up and
raising an exception.
"""
self.datacube = datacube
# Sort the list so that locks are always acquired in the same order.
# This avoids mini-deadlocks and resulting retries when acquiring
# multiple locks.
self.lock_list = sorted(lock_list)
self.wait = wait
self.retries = retries
def __enter__(self):
"""Auto-called on 'with' statement entry.
This acquires the locks or raises a LockError if it cannot
do so (after the maximum number of tries). Note that LockError
is a subclass of DatasetError, so it will cause a dataset skip.
Returns 'self' so that other methods are available via an 'as'
clause (though there are no interface methods at the moment).
"""
for dummy_tries in range(self.retries + 1):
try:
self.__acquire_locks(self.lock_list)
break
except LockError:
time.sleep(self.wait)
else:
raise LockError(("Unable to lock objects after %s tries: " %
self.retries) +
self.lock_list)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Auto-called on 'with' statement exit.
Releases the locks whether or not there has been an
exception. Implicitly returns None which causes any
exception to be re-raised.
"""
for object_to_unlock in self.lock_list:
self.datacube.unlock_object(object_to_unlock)
def __acquire_locks(self, lock_list):
"""Acquire all the locks on the lock_list.
Either sucessfully acquires *all* the locks or raises a
LockError and releases all the locks obtained so far.
"""
# Recursive algorithm.
#
# If the list is empty do nothing, otherwise ...
if lock_list:
# See if we can lock the object at the head of the list ...
if self.datacube.lock_object(lock_list[0]):
# If yes, then attempt to lock the rest (tail) of the list
# using a recursive call ...
try:
self.__acquire_locks(lock_list[1:])
except:
# If locking the tail does not work then release the
# lock we have on the head, and re-raise the exception.
self.datacube.unlock_object(lock_list[0])
raise
else:
# If we cannot lock the head then raise an exception
# (which will cause all previous calls in the chain to
# release their locks and re-raise the exception, in reverse
# order).
raise LockError()
#
# Exceptions
#
class LockError(DatasetError):
"""Exception class used by the Lock context manager."""
pass
|
{
"content_hash": "991d44059c269e396ddc36a84c809350",
"timestamp": "",
"source": "github",
"line_count": 502,
"max_line_length": 78,
"avg_line_length": 35.54183266932271,
"alnum_prop": 0.6199977580988678,
"repo_name": "sixy6e/agdc",
"id": "0a6be4f6ef87abcdf787a1e875e6b6b15967da84",
"size": "19591",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/abstract_ingester/collection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1121299"
},
{
"name": "Shell",
"bytes": "75816"
}
],
"symlink_target": ""
}
|
import time
import threading
class Switch(object):
"""Button like component with two stable states"""
def __init__(self, pin):
"""
:param pin: A instance of DigitalPin
"""
self.pin = pin
self.pin.mode = 'IN'
self.polling_task = None
self._up_callback = lambda: None
self._down_callback = lambda: None
def set_callback_up(self, callback, *args, **kwargs):
def callback_wrapper():
return callback(*args, **kwargs)
self._up_callback = callback_wrapper
def set_callback_down(self, callback, *args, **kwargs):
def callback_wrapper():
return callback(*args, **kwargs)
self._down_callback = callback_wrapper
def stop(self):
if self.polling_task is not None:
if self.polling_task.active:
self.polling_task.terminate()
self.polling_task = None
def start(self):
if self.polling_task is not None:
if self.polling_task.active:
self.stop()
self.polling_task = PollingTask(self)
threading.Thread(target=self.polling_task.run).start()
class PollingTask(object):
def __init__(self, switch):
"""
:param switch: Switch instance to poll
"""
self.switch = switch
self.active = False
def terminate(self):
self.active = False
def run(self):
self.active = True
last_state = self.switch.pin.state
while self.active:
current_state = self.switch.pin.state
if current_state != last_state:
if current_state == 'HIGH':
last_state = current_state
self.switch._up_callback()
elif current_state == 'LOW':
last_state = current_state
self.switch._down_callback()
time.sleep(0.05)
|
{
"content_hash": "8c1b1d5c430e51aca69d9eef8b3c39f8",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 62,
"avg_line_length": 29.8,
"alnum_prop": 0.5508518327310273,
"repo_name": "pingo-io/pingo-py",
"id": "ddc362c49c7d2b4ba3f5d6dd35be0018e183ecc8",
"size": "1937",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pingo/parts/button.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "416"
},
{
"name": "Python",
"bytes": "113647"
},
{
"name": "Shell",
"bytes": "221"
},
{
"name": "Vim script",
"bytes": "1139"
}
],
"symlink_target": ""
}
|
import unittest
import pandas as pd
import qiime2.metadata as metadata
import qiime2.core.type.primitive as primitive
import qiime2.core.type.grammar as grammar
class TestIntersectTwoRanges(unittest.TestCase):
def assertIntersectEqual(self, a, b, exp):
r1 = a & b
r2 = b & a
self.assertEqual(r1, r2)
self.assertEqual(r1, exp)
def test_overlap_simple(self):
a = primitive.Range(0, 10)
b = primitive.Range(3, 7)
self.assertIntersectEqual(a, b, b)
def test_overlap_inclusive_point(self):
a = primitive.Range(0, 5, inclusive_end=True)
b = primitive.Range(5, 10)
exp = primitive.Range(5, 5, inclusive_start=True, inclusive_end=True)
self.assertIntersectEqual(a, b, exp)
def test_disjoint_far(self):
a = primitive.Range(-10, -5)
b = primitive.Range(5, 10)
self.assertIntersectEqual(a, b, grammar.UnionExp())
def test_disjoint_exclusive_point(self):
a = primitive.Range(0, 5, inclusive_end=False)
b = primitive.Range(5, 9, inclusive_start=False)
self.assertIntersectEqual(a, b, grammar.UnionExp())
class TestMetadataColumn(unittest.TestCase):
def test_decode_categorical_value(self):
value = pd.Series({'a': 'a', 'b': 'b', 'c': 'c'}, name='foo')
value.index.name = 'id'
cat_md = metadata.CategoricalMetadataColumn(value)
res = primitive.MetadataColumn[primitive.Categorical].decode(cat_md)
self.assertIs(res, cat_md)
def test_decode_numeric_value(self):
value = pd.Series({'a': 1, 'b': 2, 'c': 3}, name='foo')
value.index.name = 'id'
num_md = metadata.NumericMetadataColumn(value)
res = primitive.MetadataColumn[primitive.Categorical].decode(num_md)
self.assertIs(res, num_md)
def test_decode_other(self):
with self.assertRaisesRegex(TypeError, 'provided.*directly'):
primitive.MetadataColumn[primitive.Categorical].decode(
"<metadata>")
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "e3238c45d286ada38de902cab562bad0",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 77,
"avg_line_length": 29.785714285714285,
"alnum_prop": 0.6350119904076739,
"repo_name": "biocore/qiime2",
"id": "774868ba5181cf7c75aa471100c5a8c73bc8b955",
"size": "2435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiime2/core/type/tests/test_primitive.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8399"
},
{
"name": "Python",
"bytes": "55686"
}
],
"symlink_target": ""
}
|
"""TcEx Framework Webhook Service Trigger module."""
# standard library
import base64
import json
import traceback
from typing import Any, Callable, Optional, Union
from .common_service_trigger import CommonServiceTrigger
class WebhookTriggerService(CommonServiceTrigger):
"""TcEx Framework Webhook Service Trigger module."""
def __init__(self, tcex: object):
"""Initialize the Class properties.
Args:
tcex: Instance of TcEx.
"""
super().__init__(tcex)
# config callbacks
self.webhook_event_callback = None
self.webhook_marshall_event_callback = None
def callback_response_handler(self, callback_response: Any, message: dict) -> None:
"""Handle the different types of callback responses.
# Webhook App (default)
* Dict - Playbook will not be launched and provided data
will be used in the response to the client.
* True - Playbook will be launched.
* Else - Playbook will NOT be launched.
# webhookResponseMarshall Feature App
* Callable - Playbook will be launched and if marshall callback will be set to response.
* True - Playbook will be launched.
* Else - Playbook will NOT be launched.
# webhookServiceEndpoint Feature App
For this feature the callback method must fire the event on it's own.
* Dict - Playbook will not be launched and provided data
will be used in the response to the client.
* Else - Response will be set to default of statusCode=200, body=None, and headers=[].
Args:
callback_response: The response from the webhook callback method.
message: The message payload from the server topic.
"""
if self.ij.has_feature('webhookserviceendpoint'):
self.callback_response_service_endpoint(callback_response, message)
elif self.ij.has_feature('webhookresponsemarshall'):
self.callback_response_marshall(callback_response, message)
else:
self.callback_response_webhook(callback_response, message)
def callback_response_webhook(self, callback_response: Any, message: dict) -> None:
"""Handle the different types of callback responses.
* Dict - Playbook will not be launched and provided data
will be used in the response to the client.
* True - Playbook will be launched.
* Else - Playbook will NOT be launched.
Args:
callback_response: The response from the webhook callback method.
message: The message payload from the server topic.
"""
if isinstance(callback_response, dict):
# webhook responses are for providers that require a subscription req/resp.
self.publish_webhook_event_response(message, callback_response)
elif callback_response is True:
self.increment_metric('Hits')
self.fire_event_publish(
message.get('triggerId'), self.session_id, message.get('requestKey')
)
# only required for testing in tcex framework
self._tcex_testing(self.session_id, message.get('triggerId'))
# capture fired status for testing framework
self._tcex_testing_fired_events(self.session_id, True)
else:
self.increment_metric('Misses')
# capture fired status for testing framework
self._tcex_testing_fired_events(self.session_id, False)
def callback_response_marshall(self, callback_response: Any, message: dict) -> None:
"""Handle the different types of callback responses.
# webhookResponseMarshall Feature App
* Callable - Playbook will be launched and if marshall callback will be set to response.
* True - Playbook will be launched.
* Else - Playbook will NOT be launched.
Args:
callback_response: The response from the webhook callback method.
message: The message payload from the server topic.
"""
fire_trigger = False
if callable(callback_response):
self.webhook_marshall_event_callback = callback_response
fire_trigger = True
# handle response the same a normal response
self.callback_response_webhook(fire_trigger, message)
def callback_response_service_endpoint(self, callback_response: Any, message: dict) -> None:
"""Handle the different types of callback responses.
# webhookServiceEndpoint Feature App
For this feature the callback method must fire the event on it's own.
* Dict - Playbook will not be launched and provided data
will be used in the response to the client.
* Else - Response will be set to default of statusCode=200, body=None, and headers=[].
Args:
callback_response: The response from the webhook callback method.
message: The message payload from the server topic.
"""
response = {
'body': None,
'headers': [],
'statusCode': 200,
}
if isinstance(callback_response, dict):
# webhook responses are for providers that require a subscription req/resp.
response.update(callback_response)
self.publish_webhook_event_response(message, callback_response)
@property
def command_map(self) -> dict:
"""Return the command map for the current Service type."""
command_map: dict = super().command_map
command_map.update({'webhookevent': self.process_webhook_event_command})
command_map.update({'webhookmarshallevent': self.process_webhook_marshall_event_command})
return command_map
def process_webhook_event_command(self, message: dict) -> None:
"""Process the WebhookEvent command.
.. code-block:: python
:linenos:
:lineno-start: 1
{
"appId": 387,
"command": "WebhookEvent",
"triggerId": 1,
"requestKey": "cd8c7a3a-7968-4b97-80c9-68b83a8ef1a1",
"method": "GET",
"headers": [
{
"name": "Accept-Encoding",
"value": "gzip, deflate, br"
}
],
"queryParams": [
{
"name": "registration",
"value": "true"
}
]
}
Args:
message: The message payload from the server topic.
"""
self._create_logging_handler()
self.log.trace(
f'feature=webhook-trigger-service, event=process-webhook-event, message={message}'
)
# acknowledge webhook event (nothing is currently done with this message on the core side)
self.publish_webhook_event_acknowledge(message)
# get config using triggerId passed in WebhookEvent data
config = None
outputs = {}
if not self.ij.has_feature('webhookserviceendpoint'):
config: dict = self.configs.get(message.get('triggerId'))
if config is None:
self.log.error(
'''feature=webhook-trigger-service, event=missing-config, '''
f'''trigger-id={message.get('triggerId')}'''
)
return
# get an instance of playbooks for App
outputs: Union[list, str] = config.get('tc_playbook_out_variables') or []
if isinstance(outputs, str):
outputs = outputs.split(',')
# get a context aware pb instance for the App callback method
playbook: object = self.tcex.pb(context=self.session_id, output_variables=outputs)
try:
body: Any = self.key_value_store.read(message.get('requestKey'), 'request.body')
if body is not None:
body = base64.b64decode(body).decode()
# pylint: disable=not-callable
callback_data = {
'body': body,
'headers': message.get('headers'),
'method': message.get('method'),
'params': message.get('queryParams'),
}
if self.ij.has_feature('webhookresponsemarshall') or self.ij.has_feature(
'webhookserviceendpoint'
):
# add request_key arg when marshall or services endpoints feature is set (kwarg)
callback_data.update({'request_key': message.get('requestKey')})
elif not self.ij.has_feature('webhookserviceendpoint'):
# add optional inputs for "standard" and "marshall" webhook trigger
callback_data.update(
{
'config': config,
'playbook': playbook,
'trigger_id': message.get('triggerId'),
}
)
callback_response: Union[bool, Callable[..., Any], dict] = self.webhook_event_callback(
**callback_data
)
self.callback_response_handler(callback_response, message)
except Exception as e:
self.increment_metric('Errors')
self.log.error(
'feature=webhook-trigger-service, event=webhook-callback-exception, '
f'error="""{e}"""'
)
self.log.trace(traceback.format_exc())
finally:
self.logger.remove_handler_by_name(self.thread_name)
def process_webhook_marshall_event_command(self, message: dict) -> None:
"""Process the WebhookMarshallEvent command.
.. code-block:: python
:linenos:
:lineno-start: 1
{
"appId": 95,
"bodyVariable": "request.body",
"command": "WebhookMarshallEvent",
"headers": [
{
"name": "Accept",
"value": "*/*"
}
],
"requestKey": "c29927c8-b94d-4116-a397-e6eb7002f41c",
"statusCode": 200,
"triggerId": 1234
}
Args:
message: The message payload from the server topic.
"""
self._create_logging_handler()
self.log.trace(
'feature=webhook-trigger-service, event=process-webhook-marshall-event, '
f'message={message}'
)
# acknowledge webhook event (nothing is currently done with this message on the core side)
self.publish_webhook_marshall_event_acknowledge(message)
# get config using triggerId passed in WebhookMarshallEvent data
config: dict = self.configs.get(message.get('triggerId'))
if config is None:
self.log.error(
'''feature=webhook-trigger-service, event=missing-config, '''
f'''trigger-id={message.get('triggerId')}'''
)
return
body = None
request_key: str = message.get('requestKey')
try:
body: Any = self.key_value_store.read(request_key, 'request.body')
if body is not None:
body = base64.b64decode(body).decode()
except Exception as e:
self.increment_metric('Errors')
self.log.error(
'feature=webhook-trigger-service, event=webhook-marshall-callback-exception, '
f'error="""{e}"""'
)
self.log.trace(traceback.format_exc())
# set default value for callback response to the data returned from the playbook
response = {
'body': body,
'headers': message.get('headers'),
'status_code': message.get('statusCode'),
}
if callable(self.webhook_marshall_event_callback):
try:
# call callback method
# pylint: disable=not-callable
callback_response: Optional[dict] = self.webhook_marshall_event_callback(
body=body,
headers=message.get('headers'),
request_key=request_key,
status_code=message.get('statusCode'),
trigger_id=message.get('triggerId'),
)
if isinstance(callback_response, dict):
response.update(callback_response)
except Exception as e:
self.increment_metric('Errors')
self.log.error(
'feature=webhook-trigger-service, event=webhook-marshall-callback-exception, '
f'error="""{e}"""'
)
self.log.trace(traceback.format_exc())
finally:
self.logger.remove_handler_by_name(self.thread_name)
# webhook responses are for providers that require a subscription req/resp.
self.publish_webhook_event_response(message, response)
def publish_webhook_event_acknowledge(self, message: dict) -> None:
"""Publish the WebhookEventResponse message.
Args:
message: The message from the broker.
"""
self.message_broker.publish(
json.dumps(
{
'command': 'Acknowledged',
'requestKey': message.get('requestKey'),
'triggerId': message.get('triggerId'),
'type': 'WebhookEvent',
}
),
self.args.tc_svc_client_topic,
)
def publish_webhook_marshall_event_acknowledge(self, message: dict) -> None:
"""Publish the WebhookEventResponse message.
.. code-block:: python
:linenos:
:lineno-start: 1
{
"command": "Acknowledged",
"requestKey": "cd8c7a3a-7968-4b97-80c9-68b83a8ef1a1",
"triggerId": 1,
"type": "WebhookMarshallResponse"
}
Args:
message: The message from the broker.
"""
self.message_broker.publish(
json.dumps(
{
'command': 'Acknowledged',
'requestKey': message.get('requestKey'),
'triggerId': message.get('triggerId'),
'type': 'WebhookMarshallEvent',
}
),
self.args.tc_svc_client_topic,
)
def publish_webhook_event_response(self, message: dict, callback_response: dict) -> None:
"""Publish the WebhookEventResponse message.
Args:
message: The message from the broker.
callback_response: The data from the callback method.
playbook: Configure instance of Playbook used to write body.
"""
playbook: object = self.tcex.pb(context=self.session_id, output_variables=[])
# write response body to redis
if callback_response.get('body') is not None:
playbook.create_string(
'response.body',
base64.b64encode(callback_response.get('body').encode('utf-8')).decode('utf-8'),
)
# publish response
self.message_broker.publish(
json.dumps(
{
'sessionId': self.session_id, # session/context
'requestKey': message.get('requestKey'),
'command': 'WebhookEventResponse',
'triggerId': message.get('triggerId'),
'bodyVariable': 'response.body',
'headers': callback_response.get('headers', []),
'statusCode': callback_response.get('status_code', 200),
}
),
self.args.tc_svc_client_topic,
)
|
{
"content_hash": "01ccc1de3c5ed98855011262ce22d397",
"timestamp": "",
"source": "github",
"line_count": 415,
"max_line_length": 99,
"avg_line_length": 38.602409638554214,
"alnum_prop": 0.5592384519350811,
"repo_name": "kstilwell/tcex",
"id": "53e9b3b350d54ffaa7122002cc6ee66a2d61f59b",
"size": "16020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tcex/services/webhook_trigger_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "241378"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ScaleValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="scale", parent_name="scatter3d.projection.x", **kwargs
):
super(ScaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 10),
min=kwargs.pop("min", 0),
**kwargs,
)
|
{
"content_hash": "1a5427f83136ea61bd5da01ccb1b8ea3",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 81,
"avg_line_length": 33.13333333333333,
"alnum_prop": 0.5774647887323944,
"repo_name": "plotly/plotly.py",
"id": "8f945191c8633b704861cf82171a16b2250a0be5",
"size": "497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatter3d/projection/x/_scale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.contrib.admin.options import ModelAdmin
from publish.admin import PublishableAdmin, PublishableTabularInline, PublishableStackedInline
from publish.admin import Publishable
from press.forms import ArticleAdminForm
from press.models import Article, Section, ArticleSeo
class ArticleSeoInline(PublishableStackedInline):
model = ArticleSeo
class ArticleAdmin(PublishableAdmin):
form = ArticleAdminForm
prepopulated_fields = {"slug": ("title",)}
list_display = ['title', 'section', 'publish_state', 'created_date',
'modified_date']
list_filter = ['section', 'publish_state', 'modified_date', 'created_date']
search_fields = ['title', 'subtitle']
inlines = [
ArticleSeoInline
]
class PublishMeta(Publishable.PublishMeta):
publish_reverse_fields = [
'articleseo'
]
def save_model(self, request, obj, form, change):
obj.user = request.user
super(ArticleAdmin, self).save_model(request, obj, form, change)
class SectionAdmin(ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
admin.site.register(Section, SectionAdmin)
admin.site.register(Article, ArticleAdmin)
|
{
"content_hash": "a71811b1d44d19b5ab01bbb1cf9cc943",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 94,
"avg_line_length": 28.674418604651162,
"alnum_prop": 0.7039740470397404,
"repo_name": "petry/django-press",
"id": "04cee21f1689e7b28f496f8e2ab5ca27fe90e55a",
"size": "1233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "press/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "120854"
}
],
"symlink_target": ""
}
|
import asyncio
import functools
import time
import unittest
__all__ = []
def export(f):
__all__.append(f.__name__)
return f
@export
def run_until_complete(fn):
'''Decorator to run an async test synchronously.
Use with AsyncTestBase.
'''
@functools.wraps(fn)
def wrapper(self, *a, **kw):
timeout = self.async_test_timeout
self.loop.run_until_complete(
asyncio.wait_for(fn(self, *a, **kw), timeout))
return wrapper
@export
class AsyncTestBase:
'''Async test base.
Creates a new event loop for each test.
'''
async_test_timeout = 3
def setUp(self):
super().setUp()
self.saved_loop = asyncio.get_event_loop()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.stop()
self.loop.close()
asyncio.set_event_loop(self.saved_loop)
super().tearDown()
@export
class achain:
'''Async generator from sequences.'''
def __init__(self, *seqs):
self.seqs = seqs
async def __aiter__(self):
return _achainiter(self)
class _achainiter:
def __init__(self, achain):
self.seqs = iter(achain.seqs)
self.iter = iter(())
async def __anext__(self):
try:
return next(self.iter)
except StopIteration:
try:
self.iter = iter(next(self.seqs))
except StopIteration:
raise StopAsyncIteration
return await self.__anext__()
@export
@asyncio.coroutine
def momentarily(beats, coro):
'''yield for a number of beats before yielding to a coroutine.'''
for _ in range(beats):
yield # let something else run
return (yield from coro)
# No need to keep running this everytime.
@unittest.skip('because it worked')
class TestAsyncTestBase(AsyncTestBase, unittest.TestCase):
@run_until_complete
async def test_sleep(self):
'''just a second'''
t1 = time.monotonic()
await asyncio.sleep(1)
t2 = time.monotonic()
self.assertAlmostEqual(t2-t1, 1, places=1)
|
{
"content_hash": "f722979ce4185e3834a5748bc0981240",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 69,
"avg_line_length": 22.621052631578948,
"alnum_prop": 0.598417868776175,
"repo_name": "groner/alternator.py",
"id": "18f166f879d7aa140adce369c2c02ede3491e422",
"size": "2149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/asynchelpers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "13324"
}
],
"symlink_target": ""
}
|
"""Manages the spawning of mpi processes to send to the various solvers.
"""
import os
import functools
import numpy as np
from .slepc_linalg import (
eigs_slepc, svds_slepc, mfn_multiply_slepc, ssolve_slepc,
)
from ..core import _NUM_THREAD_WORKERS
# Work out if already running as mpi
if ('OMPI_COMM_WORLD_SIZE' in os.environ) or ('PMI_SIZE' in os.environ):
ALREADY_RUNNING_AS_MPI = True
if '_QUIMB_MPI_LAUNCHED' not in os.environ:
raise RuntimeError(
"For the moment, quimb programs launched explicitly"
" using MPI need to use `quimb-mpi-python`.")
USE_SYNCRO = "QUIMB_SYNCRO_MPI" in os.environ
else:
ALREADY_RUNNING_AS_MPI = False
USE_SYNCRO = False
# Work out the desired total number of workers
for _NUM_MPI_WORKERS_VAR in ['QUIMB_NUM_MPI_WORKERS',
'QUIMB_NUM_PROCS',
'OMPI_COMM_WORLD_SIZE',
'PMI_SIZE',
'OMP_NUM_THREADS']:
if _NUM_MPI_WORKERS_VAR in os.environ:
NUM_MPI_WORKERS = int(os.environ[_NUM_MPI_WORKERS_VAR])
break
else:
import psutil
_NUM_MPI_WORKERS_VAR = 'psutil'
NUM_MPI_WORKERS = psutil.cpu_count(logical=False)
def bcast(result, comm, result_rank):
"""Broadcast a result to all workers, dispatching to proper MPI (rather
than pickled) communication if the result is a numpy array.
"""
rank = comm.Get_rank()
# make sure all workers know if result is an array or not
if rank == result_rank:
is_ndarray = isinstance(result, np.ndarray)
else:
is_ndarray = None
is_ndarray = comm.bcast(is_ndarray, root=result_rank)
# standard (pickle) bcast if not array
if not is_ndarray:
return comm.bcast(result, root=result_rank)
# make sure all workers have shape and dtype
if rank == result_rank:
shape_dtype = result.shape, str(result.dtype)
else:
shape_dtype = None
shape_dtype = comm.bcast(shape_dtype, root=result_rank)
shape, dtype = shape_dtype
# allocate data space
if rank != result_rank:
result = np.empty(shape, dtype=dtype)
# use fast communication for main array
comm.Bcast(result, root=result_rank)
return result
class SyncroFuture:
def __init__(self, result, result_rank, comm):
self._result = result
self.result_rank = result_rank
self.comm = comm
def result(self):
rank = self.comm.Get_rank()
if rank == self.result_rank:
should_it = (isinstance(self._result, tuple) and
any(isinstance(x, np.ndarray) for x in self._result))
if should_it:
iterate_over = len(self._result)
else:
iterate_over = 0
else:
iterate_over = None
iterate_over = self.comm.bcast(iterate_over, root=self.result_rank)
if iterate_over:
if rank != self.result_rank:
self._result = (None,) * iterate_over
result = tuple(bcast(x, self.comm, self.result_rank)
for x in self._result)
else:
result = bcast(self._result, self.comm, self.result_rank)
return result
@staticmethod
def cancel():
raise ValueError("SyncroFutures cannot be cancelled - they are "
"submitted in a parallel round-robin fasion where "
"each worker immediately computes all its results.")
class SynchroMPIPool:
def __init__(self):
import itertools
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.size = self.comm.Get_size()
self.rank = self.comm.Get_rank()
self.counter = itertools.cycle(range(0, NUM_MPI_WORKERS))
self._max_workers = self.size
def submit(self, fn, *args, **kwargs):
# round robin iterate through ranks
current_counter = next(self.counter)
# accept job and compute if have the same rank, else do nothing
if current_counter == self.rank:
res = fn(*args, **kwargs)
else:
res = None
# wrap the result in a SyncroFuture, that will broadcast result
return SyncroFuture(res, current_counter, self.comm)
def shutdown(self):
pass
class CachedPoolWithShutdown:
"""Decorator for caching the mpi pool when called with the equivalent args,
and shutting down previous ones when not needed.
"""
def __init__(self, pool_fn):
self._settings = '__UNINITIALIZED__'
self._pool_fn = pool_fn
def __call__(self, num_workers=None, num_threads=1):
# convert None to default so the cache the same
if num_workers is None:
num_workers = NUM_MPI_WORKERS
elif ALREADY_RUNNING_AS_MPI and (num_workers != NUM_MPI_WORKERS):
raise ValueError("Can't specify number of processes when running "
"under MPI rather than spawning processes.")
# first call
if self._settings == '__UNINITIALIZED__':
self._pool = self._pool_fn(num_workers, num_threads)
self._settings = (num_workers, num_threads)
# new type of pool requested
elif self._settings != (num_workers, num_threads):
self._pool.shutdown()
self._pool = self._pool_fn(num_workers, num_threads)
self._settings = (num_workers, num_threads)
return self._pool
@CachedPoolWithShutdown
def get_mpi_pool(num_workers=None, num_threads=1):
"""Get the MPI executor pool, with specified number of processes and
threads per process.
"""
if (num_workers == 1) and (num_threads == _NUM_THREAD_WORKERS):
from concurrent.futures import ProcessPoolExecutor
return ProcessPoolExecutor(1)
if USE_SYNCRO:
return SynchroMPIPool()
from mpi4py.futures import MPIPoolExecutor
return MPIPoolExecutor(num_workers, main=False,
env={'OMP_NUM_THREADS': str(num_threads),
'QUIMB_NUM_MPI_WORKERS': str(num_workers),
'_QUIMB_MPI_LAUNCHED': 'SPAWNED'})
class GetMPIBeforeCall(object):
"""Wrap a function to automatically get the correct communicator before
its called, and to set the `comm_self` kwarg to allow forced self mode.
This is called by every mpi process before the function evaluation.
"""
def __init__(self, fn):
self.fn = fn
def __call__(self, *args,
comm_self=False,
wait_for_workers=None,
**kwargs):
"""
Parameters
----------
*args :
Supplied to self.fn
comm_self : bool, optional
Whether to force use of MPI.COMM_SELF
wait_for_workers : int, optional
If set, wait for the communicator to have this many workers, this
can help to catch some errors regarding expected worker numbers.
**kwargs :
Supplied to self.fn
"""
from mpi4py import MPI
if not comm_self:
comm = MPI.COMM_WORLD
else:
comm = MPI.COMM_SELF
if wait_for_workers is not None:
from time import time
t0 = time()
while comm.Get_size() != wait_for_workers:
if time() - t0 > 2:
raise RuntimeError(
f"Timeout while waiting for {wait_for_workers} "
f"workers to join comm {comm}.")
comm.Barrier()
res = self.fn(*args, comm=comm, **kwargs)
comm.Barrier()
return res
class SpawnMPIProcessesFunc(object):
"""Automatically wrap a function to be executed in parallel by a
pool of mpi workers.
This is only called by the master mpi process in manual mode, only by
the (non-mpi) spawning process in automatic mode, or by all processes in
syncro mode.
"""
def __init__(self, fn):
self.fn = fn
def __call__(self, *args,
num_workers=None,
num_threads=1,
mpi_pool=None,
spawn_all=USE_SYNCRO or (not ALREADY_RUNNING_AS_MPI),
**kwargs):
"""
Parameters
----------
*args
Supplied to `self.fn`.
num_workers : int, optional
How many total process should run function in parallel.
num_threads : int, optional
How many (OMP) threads each process should use
mpi_pool : pool-like, optional
If not None (default), submit function to this pool.
spawn_all : bool, optional
Whether all the parallel processes should be spawned (True), or
num_workers - 1, so that the current process can also do work.
**kwargs
Supplied to `self.fn`.
Returns
-------
`fn` output from the master process.
"""
if num_workers is None:
num_workers = NUM_MPI_WORKERS
if num_workers == 1: # no pool or communicator required
return self.fn(*args, comm_self=True, **kwargs)
kwargs['wait_for_workers'] = num_workers
if mpi_pool is not None:
pool = mpi_pool
else:
pool = get_mpi_pool(num_workers, num_threads)
# the (non mpi) main process is idle while the workers compute.
if spawn_all:
futures = [pool.submit(self.fn, *args, **kwargs)
for _ in range(num_workers)]
results = [f.result() for f in futures]
# the master process is the master mpi process and contributes
else:
futures = [pool.submit(self.fn, *args, **kwargs)
for _ in range(num_workers - 1)]
results = ([self.fn(*args, **kwargs)] +
[f.result() for f in futures])
# Get master result, (not always first submitted)
return next(r for r in results if r is not None)
# ---------------------------------- SLEPC ---------------------------------- #
eigs_slepc_mpi = functools.wraps(eigs_slepc)(
GetMPIBeforeCall(eigs_slepc))
eigs_slepc_spawn = functools.wraps(eigs_slepc)(
SpawnMPIProcessesFunc(eigs_slepc_mpi))
svds_slepc_mpi = functools.wraps(svds_slepc)(
GetMPIBeforeCall(svds_slepc))
svds_slepc_spawn = functools.wraps(svds_slepc)(
SpawnMPIProcessesFunc(svds_slepc_mpi))
mfn_multiply_slepc_mpi = functools.wraps(mfn_multiply_slepc)(
GetMPIBeforeCall(mfn_multiply_slepc))
mfn_multiply_slepc_spawn = functools.wraps(mfn_multiply_slepc)(
SpawnMPIProcessesFunc(mfn_multiply_slepc_mpi))
ssolve_slepc_mpi = functools.wraps(ssolve_slepc)(
GetMPIBeforeCall(ssolve_slepc))
ssolve_slepc_spawn = functools.wraps(ssolve_slepc)(
SpawnMPIProcessesFunc(ssolve_slepc_mpi))
|
{
"content_hash": "2e0522f7ed0562a75e8b1cdd78bb64b1",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 79,
"avg_line_length": 33.40909090909091,
"alnum_prop": 0.5834920634920635,
"repo_name": "jcmgray/quijy",
"id": "ca0577eff39da4ec2ada9ff19c76e2ba63d50581",
"size": "11025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quimb/linalg/mpi_launcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85159"
}
],
"symlink_target": ""
}
|
def add_native_methods(clazz):
def dispose____(a0):
raise NotImplementedError()
def setFillMode__int__(a0, a1):
raise NotImplementedError()
def beginPath____(a0):
raise NotImplementedError()
def beginSubpath__float__float__(a0, a1, a2):
raise NotImplementedError()
def appendLine__float__float__(a0, a1, a2):
raise NotImplementedError()
def appendQuadratic__float__float__float__float__(a0, a1, a2, a3, a4):
raise NotImplementedError()
def appendCubic__float__float__float__float__float__float__(a0, a1, a2, a3, a4, a5, a6):
raise NotImplementedError()
def closedSubpath____(a0):
raise NotImplementedError()
def endPath____(a0):
raise NotImplementedError()
def getCPathConsumer____(a0):
raise NotImplementedError()
def getAlphaBox__int____(a0, a1):
raise NotImplementedError()
def setOutputArea__float__float__int__int__(a0, a1, a2, a3, a4):
raise NotImplementedError()
def getTileState____(a0):
raise NotImplementedError()
def nextTile____(a0):
raise NotImplementedError()
def reset____(a0):
raise NotImplementedError()
clazz.dispose____ = dispose____
clazz.setFillMode__int__ = setFillMode__int__
clazz.beginPath____ = beginPath____
clazz.beginSubpath__float__float__ = beginSubpath__float__float__
clazz.appendLine__float__float__ = appendLine__float__float__
clazz.appendQuadratic__float__float__float__float__ = appendQuadratic__float__float__float__float__
clazz.appendCubic__float__float__float__float__float__float__ = appendCubic__float__float__float__float__float__float__
clazz.closedSubpath____ = closedSubpath____
clazz.endPath____ = endPath____
clazz.getCPathConsumer____ = getCPathConsumer____
clazz.getAlphaBox__int____ = getAlphaBox__int____
clazz.setOutputArea__float__float__int__int__ = setOutputArea__float__float__int__int__
clazz.getTileState____ = getTileState____
clazz.nextTile____ = nextTile____
clazz.reset____ = reset____
|
{
"content_hash": "49ae9ebf62571ead674e45e22fcb9842",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 123,
"avg_line_length": 33.983870967741936,
"alnum_prop": 0.6312292358803987,
"repo_name": "laffra/pava",
"id": "8ffc2cc9dd5aaee3b088b4444e1ae2d632da12ef",
"size": "2107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pava/implementation/natives/sun/dc/pr/PathFiller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "144"
},
{
"name": "Python",
"bytes": "369288"
}
],
"symlink_target": ""
}
|
__author__ = 'Matt Clarke-Lauer'
__email__ = 'matt@clarkelauer.com'
__credits__ = ['Matt Clarke-Lauer']
__date__ = 8 / 1 / 13
__version__ = '0.1'
__status__ = 'Development'
import log
name = "libraryApisUsed"
description = "Gets the used library apis"
result = []
def getName():
"return analysis name"
return name
def getDescription():
"return analysis description"
return description
def getResults(results):
results["Library Apis Used"] = result
return results
def run(classes, dependencies, sharedobjs):
global result
log.info("Analysis: Library Api Check")
result = dependencies["internal"]
|
{
"content_hash": "238ffa778ffac567841aa97fdf0b0947",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 43,
"avg_line_length": 20.612903225806452,
"alnum_prop": 0.6697965571205008,
"repo_name": "mclarkelauer/AndroidAnalyzer",
"id": "fa9c030a23dd9b71dbc24636bc6f7d7a738a82eb",
"size": "639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Analysis/plugins/libraryApisUsed/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "2660501"
},
{
"name": "Python",
"bytes": "118996"
},
{
"name": "Shell",
"bytes": "2320"
},
{
"name": "Smali",
"bytes": "3701697"
}
],
"symlink_target": ""
}
|
from CIM15.CDPSM.Asset.IEC61968.Assets.AssetInfo import AssetInfo
class TransformerEndInfo(AssetInfo):
"""Transformer end data.
"""
def __init__(self, ratedU=0.0, endNumber=0, phaseAngleClock=0, emergencyS=0.0, ratedS=0.0, shortTermS=0.0, r=0.0, insulationU=0.0, connectionKind="Z", TransformerTankInfo=None, TransformerEnd=None, *args, **kw_args):
"""Initialises a new 'TransformerEndInfo' instance.
@param ratedU: Rated voltage: phase-phase for three-phase windings, and either phase-phase or phase-neutral for single-phase windings.
@param endNumber: Number for this transformer end, corresponding to the end's order in the PowerTransformer.vectorGroup attribute. Highest voltage winding should be 1.
@param phaseAngleClock: Winding phase angle where 360 degrees are represented with clock hours, so the valid values are {0, ..., 11}. For example, to express winding code 'Dyn11', set attributes as follows: 'connectionKind' = Yn and 'phaseAngleClock' = 11.
@param emergencyS: Apparent power that the winding can carry under emergency conditions (also called long-term emergency power).
@param ratedS: Normal apparent power rating.
@param shortTermS: Apparent power that this winding can carry for a short period of time (in emergency).
@param r: DC resistance.
@param insulationU: Basic insulation level voltage rating.
@param connectionKind: Kind of connection. Values are: "Z", "A", "Yn", "Y", "Zn", "D", "I"
@param TransformerTankInfo: Transformer tank data that this end description is part of.
@param TransformerEnd: All transformer ends described by this end data.
"""
#: Rated voltage: phase-phase for three-phase windings, and either phase-phase or phase-neutral for single-phase windings.
self.ratedU = ratedU
#: Number for this transformer end, corresponding to the end's order in the PowerTransformer.vectorGroup attribute. Highest voltage winding should be 1.
self.endNumber = endNumber
#: Winding phase angle where 360 degrees are represented with clock hours, so the valid values are {0, ..., 11}. For example, to express winding code 'Dyn11', set attributes as follows: 'connectionKind' = Yn and 'phaseAngleClock' = 11.
self.phaseAngleClock = phaseAngleClock
#: Apparent power that the winding can carry under emergency conditions (also called long-term emergency power).
self.emergencyS = emergencyS
#: Normal apparent power rating.
self.ratedS = ratedS
#: Apparent power that this winding can carry for a short period of time (in emergency).
self.shortTermS = shortTermS
#: DC resistance.
self.r = r
#: Basic insulation level voltage rating.
self.insulationU = insulationU
#: Kind of connection. Values are: "Z", "A", "Yn", "Y", "Zn", "D", "I"
self.connectionKind = connectionKind
self._TransformerTankInfo = None
self.TransformerTankInfo = TransformerTankInfo
self._TransformerEnd = []
self.TransformerEnd = [] if TransformerEnd is None else TransformerEnd
super(TransformerEndInfo, self).__init__(*args, **kw_args)
_attrs = ["ratedU", "endNumber", "phaseAngleClock", "emergencyS", "ratedS", "shortTermS", "r", "insulationU", "connectionKind"]
_attr_types = {"ratedU": float, "endNumber": int, "phaseAngleClock": int, "emergencyS": float, "ratedS": float, "shortTermS": float, "r": float, "insulationU": float, "connectionKind": str}
_defaults = {"ratedU": 0.0, "endNumber": 0, "phaseAngleClock": 0, "emergencyS": 0.0, "ratedS": 0.0, "shortTermS": 0.0, "r": 0.0, "insulationU": 0.0, "connectionKind": "Z"}
_enums = {"connectionKind": "WindingConnection"}
_refs = ["TransformerTankInfo", "TransformerEnd"]
_many_refs = ["TransformerEnd"]
def getTransformerTankInfo(self):
"""Transformer tank data that this end description is part of.
"""
return self._TransformerTankInfo
def setTransformerTankInfo(self, value):
if self._TransformerTankInfo is not None:
filtered = [x for x in self.TransformerTankInfo.TransformerEndInfos if x != self]
self._TransformerTankInfo._TransformerEndInfos = filtered
self._TransformerTankInfo = value
if self._TransformerTankInfo is not None:
if self not in self._TransformerTankInfo._TransformerEndInfos:
self._TransformerTankInfo._TransformerEndInfos.append(self)
TransformerTankInfo = property(getTransformerTankInfo, setTransformerTankInfo)
def getTransformerEnd(self):
"""All transformer ends described by this end data.
"""
return self._TransformerEnd
def setTransformerEnd(self, value):
for x in self._TransformerEnd:
x.TransformerEndInfo = None
for y in value:
y._TransformerEndInfo = self
self._TransformerEnd = value
TransformerEnd = property(getTransformerEnd, setTransformerEnd)
def addTransformerEnd(self, *TransformerEnd):
for obj in TransformerEnd:
obj.TransformerEndInfo = self
def removeTransformerEnd(self, *TransformerEnd):
for obj in TransformerEnd:
obj.TransformerEndInfo = None
|
{
"content_hash": "10a40d0aeb3c004f582bf5ec9e080072",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 265,
"avg_line_length": 52.450980392156865,
"alnum_prop": 0.685607476635514,
"repo_name": "rwl/PyCIM",
"id": "2e8275128f347ba0c4dab0579e34e5bdb3f3e210",
"size": "6450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/CDPSM/Asset/IEC61968/AssetModels/TransformerEndInfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
}
|
from mopidy import config
def test_core_schema_has_cache_dir():
assert "cache_dir" in config._core_schema
assert isinstance(config._core_schema["cache_dir"], config.Path)
def test_core_schema_has_config_dir():
assert "config_dir" in config._core_schema
assert isinstance(config._core_schema["config_dir"], config.Path)
def test_core_schema_has_data_dir():
assert "data_dir" in config._core_schema
assert isinstance(config._core_schema["data_dir"], config.Path)
def test_core_schema_has_max_tracklist_length():
assert "max_tracklist_length" in config._core_schema
max_tracklist_length_schema = config._core_schema["max_tracklist_length"]
assert isinstance(max_tracklist_length_schema, config.Integer)
assert max_tracklist_length_schema._minimum == 1
|
{
"content_hash": "a00490f6e3baaed5cded512f18874be4",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 34.65217391304348,
"alnum_prop": 0.7289836888331243,
"repo_name": "kingosticks/mopidy",
"id": "b34a9b6907c960c3ecc99ef031d41b48573eeb57",
"size": "797",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "tests/config/test_defaults.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "610"
},
{
"name": "HTML",
"bytes": "805"
},
{
"name": "Python",
"bytes": "743402"
},
{
"name": "Roff",
"bytes": "573"
},
{
"name": "Shell",
"bytes": "741"
}
],
"symlink_target": ""
}
|
import mido
# This is the default sysex header for launchpad MK2
# It will translate to [F0, 00, 20, 29, 02, 18]
default_header = [0,32,41,2,24]
def note_on(midiout, note, velocity = 100):
message = mido.Message('note_on', note=note, velocity = velocity)
midiout.send(message)
def note_off(midiout, note):
message = mido.Message('note_off', note=note)
midiout.send(message)
def light_on_color_code(midiout, key, color):
# 10 = Set leds in color code mode
message = mido.Message('sysex', data = default_header + [10] + [key, color])
midiout.send(message)
def light_on(midiout, key, r=63, g=63, b=63):
# 11 = Set leds in RGB mode
message = mido.Message('sysex', data = default_header + [11] + [key, r, g, b])
midiout.send(message)
def light_off(midiout, key):
# 10 = Set leds in color code mode
message = mido.Message('sysex', data = default_header + [10] + [key, 0])
midiout.send(message)
def light_all(midiout, color):
# 14 = Set all leds
message = mido.Message('sysex', data = default_header + [14] + [color])
midiout.send(message)
|
{
"content_hash": "4bbba2b1adafe77ea8fb7852b3fb7ddd",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 82,
"avg_line_length": 33.484848484848484,
"alnum_prop": 0.6506787330316742,
"repo_name": "danodic/pianopad",
"id": "b07f8acc1e46c0328fd067ea83cd3f65c93924b1",
"size": "1105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/translator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52911"
}
],
"symlink_target": ""
}
|
"""Support for Toon van Eneco devices."""
import logging
from typing import Any, Dict
from functools import partial
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, CONF_SCAN_INTERVAL
from homeassistant.helpers import config_validation as cv, device_registry as dr
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.dispatcher import dispatcher_send, async_dispatcher_connect
from . import config_flow # noqa pylint_disable=unused-import
from .const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_DISPLAY,
CONF_TENANT,
DATA_TOON_CLIENT,
DATA_TOON_CONFIG,
DATA_TOON_UPDATED,
DATA_TOON,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
# Validation of the user's configuration
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Required(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): vol.All(cv.time_period, cv.positive_timedelta),
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA = vol.Schema({vol.Optional(CONF_DISPLAY): cv.string})
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up the Toon components."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
# Store config to be used during entry setup
hass.data[DATA_TOON_CONFIG] = conf
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigType) -> bool:
"""Set up Toon from a config entry."""
from toonapilib import Toon
conf = hass.data.get(DATA_TOON_CONFIG)
toon = await hass.async_add_executor_job(
partial(
Toon,
entry.data[CONF_USERNAME],
entry.data[CONF_PASSWORD],
conf[CONF_CLIENT_ID],
conf[CONF_CLIENT_SECRET],
tenant_id=entry.data[CONF_TENANT],
display_common_name=entry.data[CONF_DISPLAY],
)
)
hass.data.setdefault(DATA_TOON_CLIENT, {})[entry.entry_id] = toon
toon_data = ToonData(hass, entry, toon)
hass.data.setdefault(DATA_TOON, {})[entry.entry_id] = toon_data
async_track_time_interval(hass, toon_data.update, conf[CONF_SCAN_INTERVAL])
# Register device for the Meter Adapter, since it will have no entities.
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, toon.agreement.id, "meter_adapter")},
manufacturer="Eneco",
name="Meter Adapter",
via_device=(DOMAIN, toon.agreement.id),
)
def update(call):
"""Service call to manually update the data."""
called_display = call.data.get(CONF_DISPLAY, None)
for toon_data in hass.data[DATA_TOON].values():
if (
called_display and called_display == toon_data.display_name
) or not called_display:
toon_data.update()
hass.services.async_register(DOMAIN, "update", update, schema=SERVICE_SCHEMA)
for component in "binary_sensor", "climate", "sensor":
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
class ToonData:
"""Communication class for interacting with toonapilib."""
def __init__(self, hass: HomeAssistantType, entry: ConfigType, toon):
"""Initialize the Toon data object."""
self._hass = hass
self._toon = toon
self._entry = entry
self.agreement = toon.agreement
self.gas = toon.gas
self.power = toon.power
self.solar = toon.solar
self.temperature = toon.temperature
self.thermostat = toon.thermostat
self.thermostat_info = toon.thermostat_info
self.thermostat_state = toon.thermostat_state
@property
def display_name(self):
"""Return the display connected to."""
return self._entry.data[CONF_DISPLAY]
def update(self, now=None):
"""Update all Toon data and notify entities."""
# Ignore the TTL meganism from client library
# It causes a lots of issues, hence we take control over caching
self._toon._clear_cache() # noqa pylint: disable=W0212
# Gather data from client library (single API call)
self.gas = self._toon.gas
self.power = self._toon.power
self.solar = self._toon.solar
self.temperature = self._toon.temperature
self.thermostat = self._toon.thermostat
self.thermostat_info = self._toon.thermostat_info
self.thermostat_state = self._toon.thermostat_state
# Notify all entities
dispatcher_send(self._hass, DATA_TOON_UPDATED, self._entry.data[CONF_DISPLAY])
class ToonEntity(Entity):
"""Defines a base Toon entity."""
def __init__(self, toon: ToonData, name: str, icon: str) -> None:
"""Initialize the Toon entity."""
self._name = name
self._state = None
self._icon = icon
self.toon = toon
self._unsub_dispatcher = None
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> str:
"""Return the mdi icon of the entity."""
return self._icon
@property
def should_poll(self) -> bool:
"""Return the polling requirement of the entity."""
return False
async def async_added_to_hass(self) -> None:
"""Connect to dispatcher listening for entity data notifications."""
self._unsub_dispatcher = async_dispatcher_connect(
self.hass, DATA_TOON_UPDATED, self._schedule_immediate_update
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect from update signal."""
self._unsub_dispatcher()
@callback
def _schedule_immediate_update(self, display_name: str) -> None:
"""Schedule an immediate update of the entity."""
if display_name == self.toon.display_name:
self.async_schedule_update_ha_state(True)
class ToonDisplayDeviceEntity(ToonEntity):
"""Defines a Toon display device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this thermostat."""
agreement = self.toon.agreement
model = agreement.display_hardware_version.rpartition("/")[0]
sw_version = agreement.display_software_version.rpartition("/")[-1]
return {
"identifiers": {(DOMAIN, agreement.id)},
"name": "Toon Display",
"manufacturer": "Eneco",
"model": model,
"sw_version": sw_version,
}
class ToonElectricityMeterDeviceEntity(ToonEntity):
"""Defines a Electricity Meter device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
return {
"name": "Electricity Meter",
"identifiers": {(DOMAIN, self.toon.agreement.id, "electricity")},
"via_device": (DOMAIN, self.toon.agreement.id, "meter_adapter"),
}
class ToonGasMeterDeviceEntity(ToonEntity):
"""Defines a Gas Meter device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
via_device = "meter_adapter"
if self.toon.gas.is_smart:
via_device = "electricity"
return {
"name": "Gas Meter",
"identifiers": {(DOMAIN, self.toon.agreement.id, "gas")},
"via_device": (DOMAIN, self.toon.agreement.id, via_device),
}
class ToonSolarDeviceEntity(ToonEntity):
"""Defines a Solar Device device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
return {
"name": "Solar Panels",
"identifiers": {(DOMAIN, self.toon.agreement.id, "solar")},
"via_device": (DOMAIN, self.toon.agreement.id, "meter_adapter"),
}
class ToonBoilerModuleDeviceEntity(ToonEntity):
"""Defines a Boiler Module device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
return {
"name": "Boiler Module",
"manufacturer": "Eneco",
"identifiers": {(DOMAIN, self.toon.agreement.id, "boiler_module")},
"via_device": (DOMAIN, self.toon.agreement.id),
}
class ToonBoilerDeviceEntity(ToonEntity):
"""Defines a Boiler device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
return {
"name": "Boiler",
"identifiers": {(DOMAIN, self.toon.agreement.id, "boiler")},
"via_device": (DOMAIN, self.toon.agreement.id, "boiler_module"),
}
|
{
"content_hash": "eaa4a8d31d24a22e7c4d019499ded1eb",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 86,
"avg_line_length": 32.90526315789474,
"alnum_prop": 0.6242269140541693,
"repo_name": "fbradyirl/home-assistant",
"id": "4a3afb9b87b408106047bc3f2f1d93f1262409d4",
"size": "9378",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/toon/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
}
|
from flask import (
abort, current_app, g, redirect, render_template, request, session,
url_for
)
from application.blueprint import admin as bp
from model import Admin
from server_global import db
import service
from application.admin.view import account, general, plan, stripe
@bp.before_request
def admin_check():
unchecked_endpoints = ('admin.login', 'admin.logout')
if 'admin username' not in session and \
request.endpoint not in unchecked_endpoints:
abort(404)
@bp.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
try:
query = db.session.query(Admin).\
filter(Admin.username==request.form.get('username'))
user = query.one()
if user.valid_password(request.form.get('password')):
session['admin username'] = user.username
return redirect(url_for('admin.home'))
except Exception as e:
# stop timing clues
service.feign_hash(15)
return render_template('admin/login.jade')
@bp.route('/logout/')
def logout():
if 'admin username' in session:
del session['admin username']
return redirect(url_for('admin.login'))
|
{
"content_hash": "8367578e516badb6a1aedc1bba495697",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 72,
"avg_line_length": 27.533333333333335,
"alnum_prop": 0.6335754640839386,
"repo_name": "glennyonemitsu/MarkupHiveServer",
"id": "3686ff46c4a15db39745b29dc709adb9aa3e6544",
"size": "1239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/application/admin/view/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "119550"
},
{
"name": "JavaScript",
"bytes": "558423"
},
{
"name": "PHP",
"bytes": "2278"
},
{
"name": "Python",
"bytes": "311722"
},
{
"name": "Ruby",
"bytes": "2284"
}
],
"symlink_target": ""
}
|
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext as _build_ext
import os
includes = ['pypesq']
try:
import numpy as np
includes += [os.path.join(np.get_include(), 'numpy')]
except:
pass
extension = Extension("pesq_core",
sources=["pypesq/pesq.c", "pypesq/dsp.c", "pypesq/pesqdsp.c",
"pypesq/pesqio.c", "pypesq/pesqmain.c", "pypesq/pesqmod.c"],
include_dirs=includes,
language='c')
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
setup(name='pypesq',
version='1.2.4',
description="A package to compute pesq score.",
url='https://github.com/vBaiCai/python-pesq',
author_email='zhuroubaicai@gmail.com',
keywords=['pesq', 'speech', 'speech quality'],
license='MIT',
packages=find_packages(),
ext_modules=[extension],
cmdclass={'build_ext': build_ext},
setup_requires=['numpy'],
py_modules=['numpy'],
zip_safe=False,
install_requires=['numpy'],
python_requires='!=3.0.*, !=3.1.*, !=3.2.*, <4',
)
|
{
"content_hash": "a720ab032db4a654b4de3c77e5e8076a",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 91,
"avg_line_length": 32.61904761904762,
"alnum_prop": 0.5678832116788322,
"repo_name": "vBaiCai/python-pesq",
"id": "71a1f31a45bb7a167df3ac7d5857218ee1e52ebb",
"size": "1370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "167005"
}
],
"symlink_target": ""
}
|
import sys
import os
# Modify our path so Python knows where to find verify_util and file_util modules.
sys.path.append(os.path.join(os.path.dirname(__file__), '../web2py/applications/cqg/question'))
import file_util
template_string = '''\
product_family = 'caesar'
question_type = 'caesar'
plaintext = $p
key = $k
hotspots = $h
'''
if len(sys.argv) != 3:
print 'Usage: caesar_generate.py template question_directory'
sys.exit(1)
try:
template = file_util.dynamic_import(sys.argv[1])
except ImportError:
print 'Failure while importing',sys.argv[1]
sys.exit(2)
for group in template.question_groups:
prefix = group[0]
question_id = 0
#We should generate the cipher using the alg not load it in... later...
for (plaintext, key, hotspots) in group[1:]:
path = os.path.join(sys.argv[2], prefix + str(question_id))
if not os.path.exists(path):
os.mkdir(path)
config_string = template_string
config_string = config_string.replace('$p', "'" + plaintext + "'")
config_string = config_string.replace('$k', str(key))
config_string = config_string.replace('$h', str(hotspots))
file_util.write_string(os.path.join(path,'cqg_config.py'), config_string)
question_id += 1
|
{
"content_hash": "a808f7ae6551dbb8ca6bf05a7986449b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 95,
"avg_line_length": 26.57777777777778,
"alnum_prop": 0.6989966555183946,
"repo_name": "stryder199/RyarkAssignments",
"id": "056fe96237c558d11039461ff28a95006503ea3a",
"size": "1215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Assignment2/bin/caesar_generate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1754"
},
{
"name": "CSS",
"bytes": "126846"
},
{
"name": "JavaScript",
"bytes": "615432"
},
{
"name": "PHP",
"bytes": "14898"
},
{
"name": "Python",
"bytes": "3757772"
},
{
"name": "R",
"bytes": "413"
},
{
"name": "Shell",
"bytes": "38797"
},
{
"name": "VimL",
"bytes": "215"
}
],
"symlink_target": ""
}
|
""" tests for MetatasDataset """
# pylint: disable=missing-function-docstring,protected-access,unused-argument,too-many-arguments
import datetime
import logging
import os
import time
import pandas as pd
import pytest
import traitlets
from metatlas.datastructures import metatlas_dataset as mads
from metatlas.datastructures import metatlas_objects as metob
from metatlas.datastructures import object_helpers as metoh
from metatlas.io import metatlas_get_data_helper_fun as ma_data
def test_metatlas_dataset_build01(metatlas_dataset):
assert len(metatlas_dataset) == 1
assert len(metatlas_dataset[0]) == 1
assert metatlas_dataset[0][0]["identification"].compound[0].inchi_key == "OLXZPDWKRNYJJZ-RRKCRQDMSA-N"
assert metatlas_dataset[0][0]["data"]["ms1_summary"]["rt_peak"] == 2.2922415733
assert (
metatlas_dataset[0][0]["lcmsrun"].experiment
== "20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583"
)
@pytest.mark.xfail
def test_metatlas_dataset_build02(mocker, atlas, group_with_2_lcmsruns, df_container, analysis_ids):
# need to mock multiprocessing for this to work
mocker.patch(
"metatlas.io.metatlas_get_data_helper_fun.df_container_from_metatlas_file", return_value=df_container
)
metatlas_dataset = mads.MetatlasDataset(ids=analysis_ids, max_cpus=2)
assert len(metatlas_dataset) == 2
assert len(metatlas_dataset[0]) == 1
def test_filter_compounds_ms1_notes_remove01(mocker, metatlas_dataset_with_2_cids, compound):
mocker.patch("metatlas.datastructures.metatlas_objects.retrieve", return_value=[compound])
metatlas_dataset = metatlas_dataset_with_2_cids
metatlas_dataset.filter_compounds_ms1_notes_remove()
assert len(metatlas_dataset[0]) == 2
metatlas_dataset.set_note(1, "ms1_notes", "Remove")
metatlas_dataset.filter_compounds_ms1_notes_remove()
assert len(metatlas_dataset[0]) == 1
def test_filter_compounds01(metatlas_dataset_with_2_cids):
# mocker.patch("metatlas.datastructures.metatlas_objects.retrieve", return_value=[compound])
metatlas_dataset = metatlas_dataset_with_2_cids
metatlas_dataset.filter_compounds(remove_idxs=[])
assert len(metatlas_dataset[0]) == 2
assert len(metatlas_dataset.atlas.compound_identifications) == 2
assert metatlas_dataset.atlas_df.shape[0] == 2
metatlas_dataset.filter_compounds(keep_idxs=[0, 1])
assert len(metatlas_dataset[0]) == 2
assert len(metatlas_dataset.atlas.compound_identifications) == 2
assert metatlas_dataset.atlas_df.shape[0] == 2
metatlas_dataset.filter_compounds(keep_idxs=[])
assert len(metatlas_dataset.atlas.compound_identifications) == 0
assert metatlas_dataset.atlas_df.shape[0] == 0
with pytest.raises(ValueError):
metatlas_dataset.filter_compounds()
def test_filter_compounds02(mocker, metatlas_dataset_with_2_cids, compound):
mocker.patch("metatlas.datastructures.metatlas_objects.retrieve", return_value=[compound])
metatlas_dataset = metatlas_dataset_with_2_cids
with pytest.raises(ValueError):
metatlas_dataset.filter_compounds(keep_idxs=[0], remove_idxs=[1])
def test_filter_compounds03(mocker, metatlas_dataset, compound):
mocker.patch("metatlas.datastructures.metatlas_objects.retrieve", return_value=[compound])
with pytest.raises(IndexError):
metatlas_dataset.filter_compounds(keep_idxs=[999])
def test_filter_compounds04(mocker, metatlas_dataset, compound):
mocker.patch("metatlas.datastructures.metatlas_objects.retrieve", return_value=[compound])
with pytest.raises(IndexError):
metatlas_dataset.filter_compounds(remove_idxs=[999])
def test_filter_compounds05(metatlas_dataset_with_2_cids, username):
original_rt_min = metatlas_dataset_with_2_cids.rts[1].rt_min
print([r.rt_min for r in metatlas_dataset_with_2_cids.rts])
updated_rt_min = 9.99
metatlas_dataset_with_2_cids.set_rt(1, "rt_min", updated_rt_min)
metatlas_dataset_with_2_cids.filter_compounds(remove_idxs=[0])
atlas = metob.retrieve("Atlas", name=metatlas_dataset_with_2_cids.atlas.name, username=username)[0]
assert atlas.compound_identifications[0].rt_references[0].rt_min != original_rt_min
assert atlas.compound_identifications[0].rt_references[0].rt_min == updated_rt_min
def test_filter_compounds06(metatlas_dataset):
metatlas_dataset.filter_compounds(keep_idxs=[])
assert len(metatlas_dataset) == 0
def test_filter_compounds07(metatlas_dataset_with_2_cids):
metatlas_dataset_with_2_cids.filter_compounds(remove_idxs=[0])
assert len(metatlas_dataset_with_2_cids[0]) == 1
def test_filter_hits_by_atlas01(mocker, metatlas_dataset_with_2_cids, hits, compound):
mocker.patch("metatlas.plots.dill2plots.get_msms_hits", return_value=hits)
mocker.patch("metatlas.datastructures.metatlas_objects.retrieve", return_value=[compound])
hits = metatlas_dataset_with_2_cids.hits
start_num = len(hits)
metatlas_dataset_with_2_cids.filter_compounds(keep_idxs=[1])
assert start_num > len(metatlas_dataset_with_2_cids.hits)
metatlas_dataset_with_2_cids.filter_compounds(remove_idxs=[0])
assert len(metatlas_dataset_with_2_cids.hits) == 0
def test_filter_hits_by_atlas02(mocker, metatlas_dataset_with_2_cids, hits, compound):
mocker.patch("metatlas.plots.dill2plots.get_msms_hits", return_value=hits)
mocker.patch("metatlas.datastructures.metatlas_objects.retrieve", return_value=[compound])
start_num = len(metatlas_dataset_with_2_cids.hits)
metatlas_dataset_with_2_cids.filter_hits_by_atlas()
assert start_num > len(metatlas_dataset_with_2_cids.hits)
def test_polarity(metatlas_dataset):
assert metatlas_dataset.polarity == "positive"
assert len(metatlas_dataset[0]) == 1
metatlas_dataset.filter_compounds(remove_idxs=[0])
assert metatlas_dataset.polarity == "positive"
def test_extra_time_setter(metatlas_dataset, hits, mocker):
mocker.patch("metatlas.plots.dill2plots.get_msms_hits", return_value=hits)
metatlas_dataset.hits # pylint: disable=pointless-statement
assert metatlas_dataset._hits is not None
metatlas_dataset.extra_time = 0.3
assert metatlas_dataset._hits is None
metatlas_dataset.hits # pylint: disable=pointless-statement
assert metatlas_dataset._hits is not None
def test_rts01(metatlas_dataset):
metatlas_dataset.set_rt(0, "rt_min", 9.99)
assert metatlas_dataset.rts[0].rt_min == 9.99
assert len(metatlas_dataset.rts) == 1
def test_rts02(metatlas_dataset):
metatlas_dataset._atlas_df = None
metatlas_dataset.set_rt(0, "rt_max", 9.99)
assert metatlas_dataset.rts[0].rt_max == 9.99
assert len(metatlas_dataset.rts) == 1
def test_rts03(metatlas_dataset, analysis_ids):
assert metatlas_dataset.rts[0].rt_max != 9.99
metatlas_dataset.set_rt(0, "rt_max", 9.99)
second_metatlas_dataset = mads.MetatlasDataset(ids=analysis_ids)
assert second_metatlas_dataset.rts[0].rt_max == 9.99
assert len(second_metatlas_dataset.rts) == 1
def test_rts04(analysis_ids, sqlite_with_atlas, mocker, lcmsrun, df_container):
mocker.patch(
"metatlas.io.metatlas_get_data_helper_fun.df_container_from_metatlas_file", return_value=df_container
)
mocker.patch("metatlas.plots.dill2plots.get_metatlas_files", return_value=[lcmsrun])
first = mads.MetatlasDataset(ids=analysis_ids)
first.set_rt(0, "rt_max", 1.11)
second = mads.MetatlasDataset(ids=analysis_ids)
assert second.rts[0].rt_max == 1.11
second.set_rt(0, "rt_max", 2.22)
third = mads.MetatlasDataset(ids=analysis_ids)
assert third.rts[0].rt_max == 2.22
def test_set_note01(metatlas_dataset, sqlite):
metatlas_dataset.set_note(0, "ms2_notes", "Foobar")
assert metatlas_dataset[0][0]["identification"].ms2_notes == "Foobar"
def test_set_note02(metatlas_dataset):
metatlas_dataset._atlas_df = None
metatlas_dataset.set_note(0, "ms1_notes", "keeper")
assert metatlas_dataset[0][0]["identification"].ms1_notes == "keeper"
def test_compound_indices_marked_remove01(sqlite, metatlas_dataset):
assert len(metatlas_dataset.compound_indices_marked_remove) == 0
metatlas_dataset.set_note(0, "ms1_notes", "REMOVE")
assert len(metatlas_dataset.compound_indices_marked_remove) == 1
def test_set_nested01():
with pytest.raises(ValueError):
mads._set_nested([], [], 0)
def test_set_nested02(atlas):
mads._set_nested(atlas, ["compound_identifications", 0, ("compound",), 0, ("inchi_key",)], "FOOBAR")
assert atlas.compound_identifications[0].compound[0].inchi_key == "FOOBAR"
def test_set_nested03(atlas):
mads._set_nested(atlas, ["name"], "My Atlas")
assert atlas.name == "My Atlas"
def test_set_nested04(atlas):
with pytest.raises(TypeError):
mads._set_nested(atlas, ["zoop"], None)
def test_set_nested05():
my_dict = {}
mads._set_nested(my_dict, ["zoop"], None)
assert my_dict["zoop"] is None
def test_error_if_bad_idxs():
data = pd.DataFrame(data={"a": [1, 2], "b": [3, 4]})
mads._error_if_bad_idxs(data, [0])
with pytest.raises(IndexError):
mads._error_if_bad_idxs(data, [2])
def test_is_remove():
assert not mads._is_remove([])
assert not mads._is_remove("foobar")
assert mads._is_remove("Remove")
assert mads._is_remove("REMOVE AND MORE")
def test_duration_since():
assert mads._duration_since(datetime.datetime.now()) == "0.00 seconds"
def test_filter_compounds_by_signal01(mocker, metatlas_dataset_with_2_cids, df_container, compound):
mocker.patch(
"metatlas.io.metatlas_get_data_helper_fun.df_container_from_metatlas_file", return_value=df_container
)
mocker.patch("metatlas.datastructures.metatlas_objects.retrieve", return_value=[compound])
assert len(metatlas_dataset_with_2_cids[0]) == 2
metatlas_dataset_with_2_cids.filter_compounds_by_signal(73, 2.30e6)
assert len(metatlas_dataset_with_2_cids[0]) == 1
metatlas_dataset_with_2_cids.filter_compounds_by_signal(73, 2.36e6)
assert metatlas_dataset_with_2_cids[0].compounds == ()
def test_filter_compounds_by_signal02(mocker, metatlas_dataset_with_2_cids, df_container):
mocker.patch(
"metatlas.io.metatlas_get_data_helper_fun.df_container_from_metatlas_file", return_value=df_container
)
assert len(metatlas_dataset_with_2_cids[0]) == 2
metatlas_dataset_with_2_cids.filter_compounds_by_signal(74, 1e5)
assert metatlas_dataset_with_2_cids[0].compounds == ()
def test_export_atlas_to_csv01(metatlas_dataset, tmp_path):
out_file = tmp_path / "export.csv"
metatlas_dataset.export_atlas_to_csv(out_file)
in_df = pd.read_csv(out_file)
assert list(in_df.columns) == [
"Unnamed: 0",
"chebi_id",
"chebi_url",
"creation_time",
"description",
"formula",
"head_id",
"hmdb_id",
"hmdb_url",
"img_abc_id",
"inchi",
"inchi_key",
"iupac_name",
"kegg_id",
"kegg_url",
"last_modified",
"lipidmaps_id",
"lipidmaps_url",
"metacyc_id",
"mono_isotopic_molecular_weight",
"name",
"neutralized_2d_inchi",
"neutralized_2d_inchi_key",
"neutralized_inchi",
"neutralized_inchi_key",
"num_free_radicals",
"number_components",
"permanent_charge",
"prev_uid",
"pubchem_compound_id",
"pubchem_url",
"source",
"synonyms",
"unique_id",
"username",
"wikipedia_url",
"label",
"id_notes",
"ms1_notes",
"ms2_notes",
"identification_notes",
"rt_min",
"rt_max",
"rt_peak",
"mz",
"mz_tolerance",
"adduct",
"polarity",
]
assert len(in_df) == 1
assert in_df.loc[0, "inchi_key"] == "OLXZPDWKRNYJJZ-RRKCRQDMSA-N"
def test_atlas_setter01(metatlas_dataset, atlas_with_2_cids):
metatlas_dataset.data # pylint: disable=pointless-statement
metatlas_dataset.atlas = atlas_with_2_cids
assert metatlas_dataset._data is None
assert len(metatlas_dataset[0]) == 2
def test_atlas_setter02(metatlas_dataset):
with pytest.raises(traitlets.traitlets.TraitError):
metatlas_dataset.atlas = [1, 2]
def test_groups01(metatlas_dataset):
assert metatlas_dataset.ids.groups[0].short_name == "POS_Cone-S1"
def test_set_extra_mz_setter(metatlas_dataset, mocker, hits):
mocker.patch("metatlas.plots.dill2plots.get_msms_hits", return_value=hits)
metatlas_dataset.data # pylint: disable=pointless-statement
metatlas_dataset.hits # pylint: disable=pointless-statement
metatlas_dataset.extra_mz = 0.43
assert metatlas_dataset._all_data is None
assert metatlas_dataset._data is None
assert metatlas_dataset._hits is None
assert metatlas_dataset.extra_mz == 0.43
def test_set_keep_nonmatches_setter(metatlas_dataset, mocker, hits):
mocker.patch("metatlas.plots.dill2plots.get_msms_hits", return_value=hits)
metatlas_dataset.hits # pylint: disable=pointless-statement
metatlas_dataset.keep_nonmatches = False
assert metatlas_dataset._hits is None
assert not metatlas_dataset.keep_nonmatches
def test_store_atlas01(atlas, sqlite, username):
atlas.name = "test_store_atlas01"
atlas_list = metob.retrieve("Atlas", name=atlas.name, username=username)
assert len(atlas_list) == 0
metob.store(atlas)
second = metob.retrieve("Atlas", name=atlas.name, username=username)
assert len(second) == 1
def test_store_atlas02(metatlas_dataset, username):
atlas_list = metob.retrieve("Atlas", name=metatlas_dataset.ids.source_atlas, username=username)
assert len(atlas_list) == 1
second = metob.retrieve("Atlas", name=metatlas_dataset.atlas.name, username=username)
assert len(second) == 1
metatlas_dataset.store_atlas(even_if_exists=True)
second = metob.retrieve("Atlas", name=metatlas_dataset.atlas.name, username=username)
assert len(second) == 1
def test_store_atlas03(metatlas_dataset, atlas, sqlite, username):
metatlas_dataset.atlas.name = "test_store_atlas01"
atlas_list = metob.retrieve("Atlas", name=metatlas_dataset.atlas.name, username=username)
assert len(atlas_list) == 0
metatlas_dataset.store_atlas()
second = metob.retrieve("Atlas", name=metatlas_dataset.atlas.name, username=username)
assert len(second) == 1
def test_store_atlas04(metatlas_dataset, sqlite, username):
metatlas_dataset.atlas.name = "test_store_atlas01"
atlas_list = metob.retrieve("Atlas", name=metatlas_dataset.atlas.name, username=username)
assert len(atlas_list) == 0
metatlas_dataset.store_atlas()
second = metob.retrieve("Atlas", name=metatlas_dataset.atlas.name, username=username)
assert len(second) == 1
metatlas_dataset.store_atlas(even_if_exists=True)
with pytest.raises(ValueError):
metatlas_dataset.store_atlas()
def test_store_atlas05(atlas, sqlite, username):
atlas.name = "test atlas"
metob.store(atlas)
second = metob.retrieve("Atlas", name=atlas.name, username=username)
assert len(second) == 1
def test_store_atlas06(atlas, sqlite_with_atlas, username):
atlas.name = "test atlas 06"
metob.store(atlas)
second = metob.retrieve("Atlas", name=atlas.name, username=username)
assert len(second) == 1
def test_store_atlas07(atlas, sqlite, username):
atlas.name = "test_store_atlas07"
metob.store(atlas)
metoh.Workspace.instance = None
atlases = metob.retrieve("Atlas", name=atlas.name, username=username)
assert len(atlases) == 1
def test_write_data_source_files01(metatlas_dataset, mocker, caplog):
mocker.patch("glob.glob", return_value=range(10))
metatlas_dataset.write_data_source_files()
assert "Data sources directory already populated" in caplog.text
def test_write_data_source_files02(metatlas_dataset, mocker, caplog):
mocker.patch("glob.glob", return_value=range(3))
mocker.patch("shutil.rmtree")
mocker.patch("metatlas.io.metatlas_get_data_helper_fun.make_data_sources_tables")
caplog.set_level(logging.INFO)
metatlas_dataset.write_data_source_files()
assert "Writing data source files to" in caplog.text
assert ma_data.make_data_sources_tables.called # pylint: disable=no-member
def test_get_atlas01(mocker, analysis_ids, df_container, lcmsrun, username):
mocker.patch(
"metatlas.io.metatlas_get_data_helper_fun.df_container_from_metatlas_file", return_value=df_container
)
mocker.patch("metatlas.plots.dill2plots.get_metatlas_files", return_value=[lcmsrun])
mocker.patch("glob.glob", return_value=range(10))
metatlas_dataset = mads.MetatlasDataset(ids=analysis_ids)
assert (
metatlas_dataset.atlas.name
== f"505892_OakGall_final_HILICz150_ANT20190824_TPL_EMA_Unlab_POS_{username}_0_0"
)
def test_get_atlas02(mocker, analysis_ids, caplog):
mocker.patch("metatlas.datastructures.metatlas_objects.retrieve", return_value=[])
caplog.set_level(logging.INFO)
with pytest.raises(ValueError):
mads.MetatlasDataset(ids=analysis_ids)
assert "Database does not contain an atlas" in caplog.text
def test_get_atlas03(mocker, analysis_ids, caplog, username):
mocker.patch("metatlas.datastructures.metatlas_objects.retrieve", return_value=[0, 0])
with pytest.raises(ValueError):
mads.MetatlasDataset(ids=analysis_ids)
atlas = f"505892_OakGall_final_HILICz150_ANT20190824_TPL_EMA_Unlab_POS_{username}_0_0"
assert f"2 atlases with name {atlas} and owned by {username} already exist." in caplog.text
def test_get_atlas04(metatlas_dataset, username):
atlases = metob.retrieve("Atlas", name="This_atlas_does_not_exists", username=username)
assert len(atlases) == 0
def test_existing_groups(mocker, metatlas_dataset):
"""This test has little value, but is needed for coverage"""
mocker.patch("metatlas.datastructures.metatlas_objects.retrieve", return_value=[])
assert metatlas_dataset.ids.existing_groups == []
def test_lcmsruns_dataframe(metatlas_dataset):
assert metatlas_dataset.ids.lcmsruns_dataframe.shape == (1, 15)
def test_store_groups01(metatlas_dataset, mocker):
mocker.patch("metatlas.datastructures.metatlas_objects.retrieve", return_value=[])
mocker.patch("metatlas.datastructures.metatlas_objects.store")
metatlas_dataset.ids.store_all_groups()
assert metob.store.called # pylint: disable=no-member
def test_store_groups02(metatlas_dataset, mocker, username):
def group():
pass
group.name = (
f"20201106_JGI-AK_PS-KM_505892_OakGall_final_QE-HF_HILICZ_USHXG01583_POS_MSMS_{username}_0_0_Cone-S1"
)
mocker.patch("metatlas.datastructures.metatlas_objects.retrieve", return_value=[group])
with pytest.raises(ValueError):
metatlas_dataset.ids.store_all_groups()
def test_short_polarity_inverse01(analysis_ids):
assert set(analysis_ids.short_polarity_inverse) == {"NEG", "FPS"}
def test_access_data_compound_name(metatlas_dataset):
assert metatlas_dataset.data[0][0]["identification"].name == "2'-deoxyadenosine"
def test_cid_type01(atlas):
assert isinstance(atlas.compound_identifications[0], metob.CompoundIdentification)
def test_load_atlas01(atlas, sqlite_with_atlas, username):
atlases = metob.retrieve("Atlas", name=atlas.name, username=username)
assert isinstance(atlases[0].compound_identifications[0], metob.CompoundIdentification)
def test_load_atlas02(atlas, sqlite_with_atlas, username):
results = metob.retrieve("Atlas", name=atlas.name, username=username)
assert isinstance(results[0].compound_identifications[0], metob.CompoundIdentification)
def test_load_atlas03(sqlite_with_atlas, atlas, username):
results = metob.retrieve("Atlas", name=atlas.name, username=username)
assert results[0].compound_identifications[0].rt_references[0].rt_peak == 2.1964640053707174
def test_invalidation01(analysis_ids):
_ = analysis_ids.groups
assert analysis_ids._groups is not None
analysis_ids.set_trait("exclude_lcmsruns", ["Cone-S1"])
assert analysis_ids._groups is None
def test_project01(analysis_ids):
assert analysis_ids.project == "505892"
def test_exclude_files01(analysis_ids):
analysis_ids.set_trait("exclude_lcmsruns", ["POS"])
# Error due to 0 LCMS runs remain
with pytest.raises(ValueError):
_ = analysis_ids.lcmsruns
def test_invlidate_groups_controlled_vocab01(analysis_ids):
_ = analysis_ids.lcmsruns
assert analysis_ids._lcmsruns is not None
analysis_ids.set_trait("groups_controlled_vocab", ["FOOBAR"])
assert analysis_ids._lcmsruns is None
def test_has_selection01():
assert mads._has_selection("foobar")
assert not mads._has_selection(None)
assert not mads._has_selection("")
assert not mads._has_selection("no selection")
assert not mads._has_selection("NO Selection")
def test_cache_dir(metatlas_dataset):
assert os.path.isdir(metatlas_dataset.ids.cache_dir)
def test_cache01(metatlas_dataset):
data = "foobar"
metadata = {"_variable_name": "test_var"}
metatlas_dataset._save_to_cache(data, metadata)
assert metatlas_dataset._query_cache(metadata) == data
def test_cache02(metatlas_dataset):
metadata = {"_variable_name": "test_var", "foo": "bar"}
metatlas_dataset._save_to_cache("", metadata)
metadata["new_key"] = ""
assert metatlas_dataset._query_cache(metadata) is None
del metadata["new_key"]
metadata["foo"] = "zoop"
assert metatlas_dataset._query_cache(metadata) is None
del metadata["foo"]
assert metatlas_dataset._query_cache(metadata) is None
def test_cache03(metatlas_dataset):
data = "first"
metadata = {"_variable_name": "test_var"}
metatlas_dataset._save_to_cache(data, metadata)
time.sleep(1)
data = "second"
metadata = {"_variable_name": "test_var"}
metatlas_dataset._save_to_cache(data, metadata)
assert metatlas_dataset._query_cache(metadata) == data
def test_save_to_cache(metatlas_dataset):
with pytest.raises(AssertionError):
metatlas_dataset._save_to_cache("", {})
def test_query_cache01(metatlas_dataset):
with pytest.raises(AssertionError):
metatlas_dataset._query_cache({})
def test_query_cache02(metatlas_dataset):
assert metatlas_dataset._query_cache({"_variable_name": "foobar"}) is None
def test_chromatography01(metatlas_dataset):
assert metatlas_dataset.ids.chromatography == "HILIC"
def test_chromatography02(metatlas_dataset):
metatlas_dataset.ids.lcmsruns[0].name = "_".join(["x"] * 7 + ["Ag683775-foobar"])
assert metatlas_dataset.ids.chromatography == "HILIC"
def test_chromatography03(metatlas_dataset):
metatlas_dataset.ids.lcmsruns[0].name = "_".join(["x"] * 7 + ["C18-DNAsip"])
assert metatlas_dataset.ids.chromatography == "C18"
def test_chromatography04(metatlas_dataset, caplog):
caplog.set_level(logging.INFO)
metatlas_dataset.ids.lcmsruns[0].name = "_".join(["x"] * 7 + ["foobar"])
chrom_type = metatlas_dataset.ids.chromatography
assert chrom_type == "foobar"
assert "Unknown chromatography field 'foobar'" in caplog.text
def test_rt_change_then_filter(metatlas_dataset_with_2_cids):
original_rt_min = metatlas_dataset_with_2_cids.rts[1].rt_min
updated_rt_min = 9.99
metatlas_dataset_with_2_cids.set_rt(1, "rt_min", updated_rt_min)
metatlas_dataset_with_2_cids.invalidate_data()
assert metatlas_dataset_with_2_cids.rts[1].rt_min != original_rt_min
assert metatlas_dataset_with_2_cids.rts[1].rt_min == updated_rt_min
def test_remove_compound_id01(metatlas_dataset_with_2_cids):
assert len(metatlas_dataset_with_2_cids[0]) == 2
metatlas_dataset_with_2_cids._remove_compound_id(0)
assert len(metatlas_dataset_with_2_cids[0]) == 1
assert len(metatlas_dataset_with_2_cids._all_data[0]) == 1
def test_remove_compound_id02(metatlas_dataset_with_2_cids):
assert len(metatlas_dataset_with_2_cids[0]) == 2
metatlas_dataset_with_2_cids._remove_compound_id(1)
assert len(metatlas_dataset_with_2_cids[0]) == 1
def test_filter_data01(metatlas_dataset_with_2_cids):
assert len(metatlas_dataset_with_2_cids[0]) == 2
metatlas_dataset_with_2_cids._filter_data([0])
assert len(metatlas_dataset_with_2_cids[0]) == 1
assert len(metatlas_dataset_with_2_cids._all_data[0]) == 1
def test_filter_data02(metatlas_dataset_with_2_cids):
assert len(metatlas_dataset_with_2_cids[0]) == 2
metatlas_dataset_with_2_cids._filter_data([1])
assert len(metatlas_dataset_with_2_cids[0]) == 1
def test_filter_data03(metatlas_dataset_with_2_cids):
assert len(metatlas_dataset_with_2_cids[0]) == 2
metatlas_dataset_with_2_cids._filter_data([])
assert len(metatlas_dataset_with_2_cids._data[0]) == 0
def test_filter_data04(metatlas_dataset_with_2_cids):
assert len(metatlas_dataset_with_2_cids[0]) == 2
metatlas_dataset_with_2_cids._filter_data([1, 0])
assert len(metatlas_dataset_with_2_cids[0]) == 2
|
{
"content_hash": "fe83426d617323bbbe90755130f77143",
"timestamp": "",
"source": "github",
"line_count": 670,
"max_line_length": 109,
"avg_line_length": 37.55223880597015,
"alnum_prop": 0.7043720190779015,
"repo_name": "biorack/metatlas",
"id": "5c7f7fbae0d7d06c516acec0444ca8d2773e06c2",
"size": "25160",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/test_metatlas_dataset.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "4850"
},
{
"name": "Jupyter Notebook",
"bytes": "1233246"
},
{
"name": "Python",
"bytes": "1501450"
},
{
"name": "Shell",
"bytes": "66479"
},
{
"name": "wdl",
"bytes": "18796"
}
],
"symlink_target": ""
}
|
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__,'ifilter.pyd')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
|
{
"content_hash": "0ae3ac009e7015969d8d124f7eb54af6",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 69,
"avg_line_length": 39.714285714285715,
"alnum_prop": 0.5971223021582733,
"repo_name": "Titulacion-Sistemas/PythonTitulacion-EV",
"id": "e6811d66a173249f632dd71311731bbe4db88079",
"size": "278",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Lib/site-packages/pywin32-219-py2.7-win32.egg/win32comext/ifilter/ifilter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "2117"
},
{
"name": "C",
"bytes": "469338"
},
{
"name": "C++",
"bytes": "93276"
},
{
"name": "CSS",
"bytes": "173812"
},
{
"name": "JavaScript",
"bytes": "203291"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "17198855"
},
{
"name": "Shell",
"bytes": "2237"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "Visual Basic",
"bytes": "904"
},
{
"name": "XSLT",
"bytes": "154751"
}
],
"symlink_target": ""
}
|
from pathlib import Path
ADDON_ID = __package__
ADDON_DIR = Path(__file__).parent
CONFIG_DIR = ADDON_DIR / ".config"
if not CONFIG_DIR.exists():
import sys
if sys.platform == "win32":
CONFIG_DIR = Path.home() / "AppData" / "Roaming" / "Blender Foundation" / "Blender" / "JewelCraft"
elif sys.platform == "darwin":
CONFIG_DIR = Path.home() / "Library" / "Application Support" / "Blender" / "JewelCraft"
else:
CONFIG_DIR = Path.home() / ".config" / "blender" / "JewelCraft"
GEM_ASSET_DIR = ADDON_DIR / "assets" / "gems"
GEM_ASSET_FILEPATH = GEM_ASSET_DIR / "gems.blend"
ICONS_DIR = ADDON_DIR / "assets" / "icons"
HTML_DESIGN_REPORT_DIR = ADDON_DIR / "assets" / "templates" / "design_report"
WEIGHTING_LIB_BUILTIN_DIR = ADDON_DIR / "assets" / "weighting"
WEIGHTING_LIB_USER_DIR = CONFIG_DIR / "Weighting Library"
ASSET_LIBS_FILEPATH = CONFIG_DIR / "libraries.json"
ASSET_FAVS_FILEPATH = CONFIG_DIR / "favorites.json"
|
{
"content_hash": "22ab4a0249e89fbded68ba7731c6e2c2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 106,
"avg_line_length": 36.92307692307692,
"alnum_prop": 0.6552083333333333,
"repo_name": "mrachinskiy/blender-addon-jewelcraft",
"id": "478a1267ec8bc744359baa42998771883f4edcb3",
"size": "1046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "var.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46466"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from django.views.decorators.csrf import csrf_exempt
from oscar.core.application import Application
from accounts.api import views, decorators
class APIApplication(Application):
name = None
accounts_view = views.AccountsView
account_view = views.AccountView
account_redemptions_view = views.AccountRedemptionsView
account_refunds_view = views.AccountRefundsView
transfer_view = views.TransferView
transfer_reverse_view = views.TransferReverseView
transfer_refunds_view = views.TransferRefundsView
def get_urls(self):
urlpatterns = patterns('',
url(r'^accounts/$',
self.accounts_view.as_view(),
name='accounts'),
url(r'^accounts/(?P<code>[A-Z0-9]+)/$',
self.account_view.as_view(),
name='account'),
url(r'^accounts/(?P<code>[A-Z0-9]+)/redemptions/$',
self.account_redemptions_view.as_view(),
name='account-redemptions'),
url(r'^accounts/(?P<code>[A-Z0-9]+)/refunds/$',
self.account_refunds_view.as_view(),
name='account-refunds'),
url(r'^transfers/(?P<reference>[A-Z0-9]{32})/$',
self.transfer_view.as_view(),
name='transfer'),
url(r'^transfers/(?P<reference>[A-Z0-9]{32})/reverse/$',
self.transfer_reverse_view.as_view(),
name='transfer-reverse'),
url(r'^transfers/(?P<reference>[A-Z0-9]{32})/refunds/$',
self.transfer_refunds_view.as_view(),
name='transfer-refunds'),
)
return self.post_process_urls(urlpatterns)
def get_url_decorator(self, url_name):
return lambda x: csrf_exempt(decorators.basicauth(x))
application = APIApplication()
|
{
"content_hash": "8d565c26655501616357a17fad190497",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 68,
"avg_line_length": 36.68627450980392,
"alnum_prop": 0.5943345804382683,
"repo_name": "amsys/django-account-balances",
"id": "1cf52c842e20f8826e1532e98270fee595b5a585",
"size": "1871",
"binary": false,
"copies": "6",
"ref": "refs/heads/remove-oscar",
"path": "accounts/api/app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "31075"
},
{
"name": "Makefile",
"bytes": "359"
},
{
"name": "Python",
"bytes": "149657"
}
],
"symlink_target": ""
}
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_cluster
short_description: Manages host clusters on Apache CloudStack based clouds.
description:
- Create, update and remove clusters.
version_added: '2.1'
author: René Moser (@resmo)
options:
name:
description:
- name of the cluster.
type: str
required: true
zone:
description:
- Name of the zone in which the cluster belongs to.
- If not set, default zone is used.
type: str
pod:
description:
- Name of the pod in which the cluster belongs to.
type: str
cluster_type:
description:
- Type of the cluster.
- Required if I(state=present)
type: str
choices: [ CloudManaged, ExternalManaged ]
hypervisor:
description:
- Name the hypervisor to be used.
- Required if I(state=present).
- Possible values are C(KVM), C(VMware), C(BareMetal), C(XenServer), C(LXC), C(HyperV), C(UCS), C(OVM), C(Simulator).
type: str
url:
description:
- URL for the cluster
type: str
username:
description:
- Username for the cluster.
type: str
password:
description:
- Password for the cluster.
type: str
guest_vswitch_name:
description:
- Name of virtual switch used for guest traffic in the cluster.
- This would override zone wide traffic label setting.
type: str
guest_vswitch_type:
description:
- Type of virtual switch used for guest traffic in the cluster.
- Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)
type: str
choices: [ vmwaresvs, vmwaredvs ]
public_vswitch_name:
description:
- Name of virtual switch used for public traffic in the cluster.
- This would override zone wide traffic label setting.
type: str
public_vswitch_type:
description:
- Type of virtual switch used for public traffic in the cluster.
- Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)
type: str
choices: [ vmwaresvs, vmwaredvs ]
vms_ip_address:
description:
- IP address of the VSM associated with this cluster.
type: str
vms_username:
description:
- Username for the VSM associated with this cluster.
type: str
vms_password:
description:
- Password for the VSM associated with this cluster.
type: str
ovm3_cluster:
description:
- Ovm3 native OCFS2 clustering enabled for cluster.
type: str
ovm3_pool:
description:
- Ovm3 native pooling enabled for cluster.
type: str
ovm3_vip:
description:
- Ovm3 vip to use for pool (and cluster).
type: str
state:
description:
- State of the cluster.
type: str
choices: [ present, absent, disabled, enabled ]
default: present
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Ensure a cluster is present
cs_cluster:
name: kvm-cluster-01
zone: ch-zrh-ix-01
hypervisor: KVM
cluster_type: CloudManaged
delegate_to: localhost
- name: Ensure a cluster is disabled
cs_cluster:
name: kvm-cluster-01
zone: ch-zrh-ix-01
state: disabled
delegate_to: localhost
- name: Ensure a cluster is enabled
cs_cluster:
name: kvm-cluster-01
zone: ch-zrh-ix-01
state: enabled
delegate_to: localhost
- name: Ensure a cluster is absent
cs_cluster:
name: kvm-cluster-01
zone: ch-zrh-ix-01
state: absent
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the cluster.
returned: success
type: str
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the cluster.
returned: success
type: str
sample: cluster01
allocation_state:
description: State of the cluster.
returned: success
type: str
sample: Enabled
cluster_type:
description: Type of the cluster.
returned: success
type: str
sample: ExternalManaged
cpu_overcommit_ratio:
description: The CPU overcommit ratio of the cluster.
returned: success
type: str
sample: 1.0
memory_overcommit_ratio:
description: The memory overcommit ratio of the cluster.
returned: success
type: str
sample: 1.0
managed_state:
description: Whether this cluster is managed by CloudStack.
returned: success
type: str
sample: Managed
ovm3_vip:
description: Ovm3 VIP to use for pooling and/or clustering
returned: success
type: str
sample: 10.10.10.101
hypervisor:
description: Hypervisor of the cluster
returned: success
type: str
sample: VMware
zone:
description: Name of zone the cluster is in.
returned: success
type: str
sample: ch-gva-2
pod:
description: Name of pod the cluster is in.
returned: success
type: str
sample: pod01
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackCluster(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackCluster, self).__init__(module)
self.returns = {
'allocationstate': 'allocation_state',
'hypervisortype': 'hypervisor',
'clustertype': 'cluster_type',
'podname': 'pod',
'managedstate': 'managed_state',
'memoryovercommitratio': 'memory_overcommit_ratio',
'cpuovercommitratio': 'cpu_overcommit_ratio',
'ovm3vip': 'ovm3_vip',
}
self.cluster = None
def _get_common_cluster_args(self):
args = {
'clustername': self.module.params.get('name'),
'hypervisor': self.module.params.get('hypervisor'),
'clustertype': self.module.params.get('cluster_type'),
}
state = self.module.params.get('state')
if state in ['enabled', 'disabled']:
args['allocationstate'] = state.capitalize()
return args
def get_pod(self, key=None):
args = {
'name': self.module.params.get('pod'),
'zoneid': self.get_zone(key='id'),
}
pods = self.query_api('listPods', **args)
if pods:
return self._get_by_key(key, pods['pod'][0])
self.module.fail_json(msg="Pod %s not found in zone %s" % (self.module.params.get('pod'), self.get_zone(key='name')))
def get_cluster(self):
if not self.cluster:
args = {}
uuid = self.module.params.get('id')
if uuid:
args['id'] = uuid
clusters = self.query_api('listClusters', **args)
if clusters:
self.cluster = clusters['cluster'][0]
return self.cluster
args['name'] = self.module.params.get('name')
clusters = self.query_api('listClusters', **args)
if clusters:
self.cluster = clusters['cluster'][0]
# fix different return from API then request argument given
self.cluster['hypervisor'] = self.cluster['hypervisortype']
self.cluster['clustername'] = self.cluster['name']
return self.cluster
def present_cluster(self):
cluster = self.get_cluster()
if cluster:
cluster = self._update_cluster()
else:
cluster = self._create_cluster()
return cluster
def _create_cluster(self):
required_params = [
'cluster_type',
'hypervisor',
]
self.module.fail_on_missing_params(required_params=required_params)
args = self._get_common_cluster_args()
args['zoneid'] = self.get_zone(key='id')
args['podid'] = self.get_pod(key='id')
args['url'] = self.module.params.get('url')
args['username'] = self.module.params.get('username')
args['password'] = self.module.params.get('password')
args['guestvswitchname'] = self.module.params.get('guest_vswitch_name')
args['guestvswitchtype'] = self.module.params.get('guest_vswitch_type')
args['publicvswitchtype'] = self.module.params.get('public_vswitch_name')
args['publicvswitchtype'] = self.module.params.get('public_vswitch_type')
args['vsmipaddress'] = self.module.params.get('vms_ip_address')
args['vsmusername'] = self.module.params.get('vms_username')
args['vmspassword'] = self.module.params.get('vms_password')
args['ovm3cluster'] = self.module.params.get('ovm3_cluster')
args['ovm3pool'] = self.module.params.get('ovm3_pool')
args['ovm3vip'] = self.module.params.get('ovm3_vip')
self.result['changed'] = True
cluster = None
if not self.module.check_mode:
res = self.query_api('addCluster', **args)
# API returns a list as result CLOUDSTACK-9205
if isinstance(res['cluster'], list):
cluster = res['cluster'][0]
else:
cluster = res['cluster']
return cluster
def _update_cluster(self):
cluster = self.get_cluster()
args = self._get_common_cluster_args()
args['id'] = cluster['id']
if self.has_changed(args, cluster):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateCluster', **args)
cluster = res['cluster']
return cluster
def absent_cluster(self):
cluster = self.get_cluster()
if cluster:
self.result['changed'] = True
args = {
'id': cluster['id'],
}
if not self.module.check_mode:
self.query_api('deleteCluster', **args)
return cluster
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
zone=dict(),
pod=dict(),
cluster_type=dict(choices=['CloudManaged', 'ExternalManaged']),
hypervisor=dict(),
state=dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'),
url=dict(),
username=dict(),
password=dict(no_log=True),
guest_vswitch_name=dict(),
guest_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs']),
public_vswitch_name=dict(),
public_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs']),
vms_ip_address=dict(),
vms_username=dict(),
vms_password=dict(no_log=True),
ovm3_cluster=dict(),
ovm3_pool=dict(),
ovm3_vip=dict(),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_cluster = AnsibleCloudStackCluster(module)
state = module.params.get('state')
if state in ['absent']:
cluster = acs_cluster.absent_cluster()
else:
cluster = acs_cluster.present_cluster()
result = acs_cluster.get_result(cluster)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
{
"content_hash": "8bd3c20d761162108dd89a384153ff7c",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 125,
"avg_line_length": 29.425587467362924,
"alnum_prop": 0.6157054125998226,
"repo_name": "thaim/ansible",
"id": "f171fbe8461c62a2c0f1df9aaf17bbc09fa8bdf5",
"size": "11454",
"binary": false,
"copies": "25",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/cloudstack/cs_cluster.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
from positioning.entities import Sample
from positioning.links.base import Base
class ToFullSamples(Base):
def __init__(self, ap_data_dao, **kwargs):
self.ap_data_dao = ap_data_dao
def to_sample(self, stamp):
ap_datas = self.ap_data_dao.get_for_time_range(stamp.start_time, stamp.end_time, asc=False)
grouped = defaultdict(list)
for ap_data in ap_datas:
grouped[ap_data.router_mac.mac].append(ap_data)
return Sample(stamp, grouped)
def calculate(self, sample_stamps, **kwargs):
return {"samples": list(map(self.to_sample, sample_stamps))}
|
{
"content_hash": "9ab9dbe9a774c082fe6a616b7d4ffee8",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 99,
"avg_line_length": 34.31578947368421,
"alnum_prop": 0.6733128834355828,
"repo_name": "maveron58/indiana",
"id": "dc61572c26d3cda914c9a9511bba2278fd62ea5d",
"size": "652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "positioning/links/transform/to_full_samples.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1010"
},
{
"name": "HTML",
"bytes": "11543"
},
{
"name": "JavaScript",
"bytes": "34660"
},
{
"name": "Makefile",
"bytes": "560"
},
{
"name": "Python",
"bytes": "123694"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from traits.testing.unittest_tools import unittest, UnittestTools
from ..constant import CANCEL, NO, OK, YES
from ..gui import GUI
from ..toolkit import toolkit_object
from ..window import Window
ModalDialogTester = toolkit_object('util.modal_dialog_tester:ModalDialogTester')
no_modal_dialog_tester = (ModalDialogTester.__name__ == 'Unimplemented')
class TestWindow(unittest.TestCase, UnittestTools):
def setUp(self):
self.gui = GUI()
self.window = Window()
def test_destroy(self):
# test that destroy works even when no control
self.window.destroy()
def test_open_close(self):
# test that opening and closing works as expected
with self.assertTraitChanges(self.window, 'opening', count=1):
with self.assertTraitChanges(self.window, 'opened', count=1):
self.window.open()
self.gui.process_events()
with self.assertTraitChanges(self.window, 'closing', count=1):
with self.assertTraitChanges(self.window, 'closed', count=1):
self.window.close()
self.gui.process_events()
def test_show(self):
# test that showing works as expected
self.window._create()
self.window.show(True)
self.gui.process_events()
self.window.show(False)
self.gui.process_events()
self.window.destroy()
def test_activate(self):
# test that activation works as expected
self.window.open()
self.gui.process_events()
self.window.activate()
self.gui.process_events()
self.window.close()
def test_position(self):
# test that default position works as expected
self.window.position = (100, 100)
self.window.open()
self.gui.process_events()
self.window.close()
def test_reposition(self):
# test that changing position works as expected
self.window.open()
self.gui.process_events()
self.window.position = (100, 100)
self.gui.process_events()
self.window.close()
def test_size(self):
# test that default size works as expected
self.window.size = (100, 100)
self.window.open()
self.gui.process_events()
self.window.close()
def test_resize(self):
# test that changing size works as expected
self.window.open()
self.gui.process_events()
self.window.size = (100, 100)
self.gui.process_events()
self.window.close()
def test_title(self):
# test that default title works as expected
self.window.title = "Test Title"
self.window.open()
self.gui.process_events()
self.window.close()
def test_retitle(self):
# test that changing title works as expected
self.window.open()
self.gui.process_events()
self.window.title = "Test Title"
self.gui.process_events()
self.window.close()
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
def test_confirm_reject(self):
# test that cancel works as expected
tester = ModalDialogTester(
lambda: self.window.confirm("message", cancel=True))
tester.open_and_run(when_opened=lambda x: x.close(accept=False))
self.assertEqual(tester.result, CANCEL)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
def test_confirm_yes(self):
# test that yes works as expected
tester = ModalDialogTester(lambda: self.window.confirm("message"))
tester.open_and_wait(when_opened=lambda x: x.click_button(YES))
self.gui.process_events()
self.assertEqual(tester.result, YES)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
def test_confirm_no(self):
# test that no works as expected
tester = ModalDialogTester(lambda: self.window.confirm("message"))
tester.open_and_wait(when_opened=lambda x: x.click_button(NO))
self.gui.process_events()
self.assertEqual(tester.result, NO)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
def test_confirm_cancel(self):
# test that cncel works as expected
tester = ModalDialogTester(
lambda: self.window.confirm("message", cancel=True))
tester.open_and_wait(when_opened=lambda x: x.click_button(CANCEL))
self.gui.process_events()
self.assertEqual(tester.result, CANCEL)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
def test_information_accept(self):
# test that information works as expected
tester = ModalDialogTester(lambda: self.window.information("message"))
tester.open_and_run(when_opened=lambda x: x.close(accept=True))
self.assertIsNone(tester.result)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
def test_information_ok(self):
# test that information works as expected
tester = ModalDialogTester(lambda: self.window.information("message"))
tester.open_and_wait(when_opened=lambda x: x.click_button(OK))
self.assertIsNone(tester.result)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
def test_warning_accept(self):
# test that warning works as expected
tester = ModalDialogTester(lambda: self.window.warning("message"))
tester.open_and_run(when_opened=lambda x: x.close(accept=True))
self.assertIsNone(tester.result)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
def test_warning_ok(self):
# test that warning works as expected
tester = ModalDialogTester(lambda: self.window.warning("message"))
tester.open_and_wait(when_opened=lambda x: x.click_button(OK))
self.assertIsNone(tester.result)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
def test_error_accept(self):
# test that error works as expected
tester = ModalDialogTester(lambda: self.window.error("message"))
tester.open_and_run(when_opened=lambda x: x.close(accept=True))
self.assertIsNone(tester.result)
@unittest.skipIf(no_modal_dialog_tester, 'ModalDialogTester unavailable')
def test_error_ok(self):
# test that error works as expected
tester = ModalDialogTester(lambda: self.window.error("message"))
tester.open_and_wait(when_opened=lambda x: x.click_button(OK))
self.assertIsNone(tester.result)
|
{
"content_hash": "07dc26c56720902dd52af772f6d758e2",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 80,
"avg_line_length": 39.16470588235294,
"alnum_prop": 0.6631120456593572,
"repo_name": "geggo/pyface",
"id": "f322d396a81e49636ed24b44014c14f7833ae7c9",
"size": "6658",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyface/tests/test_window.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "896"
},
{
"name": "Python",
"bytes": "2246684"
},
{
"name": "Shell",
"bytes": "940"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_shirt_field_09.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "666d61d00446ef05fd7126a4e3261aa8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 87,
"avg_line_length": 24.307692307692307,
"alnum_prop": 0.6993670886075949,
"repo_name": "obi-two/Rebelion",
"id": "cd5915edb1192150b55d7a551435ba579212eac0",
"size": "461",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/clothing/shared_clothing_shirt_field_09.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
import unittest
"""
Given a string, find the minimum number of characters to be inserted to convert it to palindrome.
Input: ab
Output: 1 (bab)
Input: aa
Output: 0
Input: abcd
Output: 3 (dcbabcd)
Input: abcda
Output: 2 (adcbcda) which is same as insertions for bcd.
"""
"""
Approach 1:
1. Let min_insertions(str, start, end) denote the minimum insertions for str[start...end]
2. This problem has the following optimal substructure:
if str[start]==str[end]:
min_insertion(str,start,end) = min_insertions(str,start+1,end-1)
else:
min_insertion(str,start,end) = 1+min(min_insertions(str,start,end-1),min_insertions(str,start+1,end))
Approach 2:
1. Find the length of LCS between str and reverse(str).
2. Min insertions = N - length of LCS where N is number of characters in str.
"""
def min_insertions_to_make_palindrome(string):
n = len(string)
# table[i][j] denotes minimum insertions to make str[i...j] a palindrome.
# Final result is in table[0][n-1]
table = [[0] * n for _ in range(n)]
for L in range(2, n+1):
for i in range(n-L+1):
j = i+L-1
if string[i] != string[j]:
if L == 2:
table[i][j] = 1
else:
table[i][j] = 1 + min(table[i+1][j], table[i][j-1])
elif L > 2:
table[i][j] = table[i+1][j-1]
return table[0][n-1]
class TestMinInsertions(unittest.TestCase):
def test_min_insertions(self):
string = 'geeks'
self.assertEqual(min_insertions_to_make_palindrome(string), 3)
string = 'abcde'
self.assertEqual(min_insertions_to_make_palindrome(string), 4)
|
{
"content_hash": "476fd95871033f928e14a71ae5f7f030",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 109,
"avg_line_length": 29.50877192982456,
"alnum_prop": 0.6135552913198573,
"repo_name": "prathamtandon/g4gproblems",
"id": "5e4d341292ec892c3fce990030d4df35a6a2bbc6",
"size": "1682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DP/min_insertions_to_make_palindrome.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "328776"
}
],
"symlink_target": ""
}
|
import os
import os.path
import tornado.escape
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
from handlers import base
from handlers import websockets
from ui import modules
from panoptes.utils import database
from panoptes.utils.config.client import get_config
from panoptes.utils.messaging import PanMessaging
tornado.options.define("port", default=8888, help="port", type=int)
tornado.options.define("debug", default=False, help="debug mode")
# tornado.options.define('log_file_prefix', default='/var/panoptes/logs/paws.log')
class WebAdmin(tornado.web.Application):
""" The main Application entry for our PANOPTES admin interface """
def __init__(self, config={}):
db = database.PanDB()
msg_subscriber = PanMessaging.create_subscriber(6511, host='0.0.0.0')
cmd_publisher = PanMessaging.create_publisher(6500)
self._base_dir = '{}'.format(os.getenv('PAWS', default='/var/panoptes/PAWS'))
name = config.setdefault('name', 'PAWS')
server = config.setdefault('server_url', '0.0.0.0')
server_url = '{}:{}'.format(server, tornado.options.options.port)
app_handlers = [
(r"/", base.MainHandler),
(r"/observations/(.*)", base.ObservationsHistoryHandler),
(r"/ws/(.*)", websockets.PanWebSocket),
]
settings = dict(
cookie_secret="PANOPTES_SUPER_DOOPER_SECRET",
template_path=os.path.join(self._base_dir, "templates"),
static_path=os.path.join(self._base_dir, "static"),
xsrf_cookies=True,
db=db,
msg_subscriber=msg_subscriber,
cmd_publisher=cmd_publisher,
config=config,
name=name,
server_url=server_url,
site_title=name,
ui_modules=modules,
port=tornado.options.options.port,
compress_response=True,
debug=tornado.options.options.debug,
)
super().__init__(app_handlers, **settings)
if __name__ == '__main__':
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(WebAdmin(get_config(host='0.0.0.0')))
http_server.listen(tornado.options.options.port)
print("Starting PAWS on port {}".format(tornado.options.options.port))
tornado.ioloop.IOLoop.current().start()
|
{
"content_hash": "162fedf2d649c231f25b3208bc29ebe3",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 85,
"avg_line_length": 34.042857142857144,
"alnum_prop": 0.6441460344104071,
"repo_name": "panoptes/PAWS",
"id": "9d609a2495bcaccd4781725e631373187067f3b1",
"size": "2383",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10474"
},
{
"name": "Dockerfile",
"bytes": "426"
},
{
"name": "HTML",
"bytes": "17318"
},
{
"name": "JavaScript",
"bytes": "88086"
},
{
"name": "Python",
"bytes": "8367"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('notifications', '0011_remove_gcmmessage_registrations_ids'),
]
operations = [
migrations.AddField(
model_name='gcmmessage',
name='registration_ids',
field=models.TextField(blank=True, help_text='The registration_ids that GCM says it delivered to'),
),
]
|
{
"content_hash": "94ecebc92151d81c3179998c615b71eb",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 111,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.6413502109704642,
"repo_name": "tndatacommons/tndata_backend",
"id": "812e75867dda42f61e33a919eebc7a3e97d32cc1",
"size": "498",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tndata_backend/notifications/migrations/0012_gcmmessage_registration_ids.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29078"
},
{
"name": "HTML",
"bytes": "680433"
},
{
"name": "JavaScript",
"bytes": "186991"
},
{
"name": "Makefile",
"bytes": "393"
},
{
"name": "Python",
"bytes": "2023392"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
}
|
"""
oer_data_split.py
using files from oer_processing, and from RadTiltCorr, split files into daily and monthly files for
easier analysis.
"""
#System Stack
import datetime
import argparse
import csv
#Science Stack
import numpy as np
import ephem
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 06, 07)
__modified__ = datetime.datetime(2016, 03, 07)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'mooring','csv','timeseries', 'dygraphs'
""" ---------------------------------- Data Read --------------------------------------"""
def CSV2Dic(filein):
reader = csv.DictReader(open(filein,'rU'))
result = {}
for row in reader:
for column, value in row.iteritems():
result.setdefault(column, []).append(value)
return result
"""------------------------------- MAIN ----------------------------------------"""
parser = argparse.ArgumentParser(description='OER Radiometric subsetting')
parser.add_argument('DataPath', metavar='DataPath', type=str, help='full path to file')
parser.add_argument('TimeBase', metavar='TimeBase', type=str, help='choose "daily", "weekly" or "monthly"')
args = parser.parse_args()
root_path = "/".join(args.DataPath.split('/')[:-1])
file_name = args.DataPath.split('/')[-1].split('.')[0]
in_dat = CSV2Dic(args.DataPath)
temp_date = [datetime.datetime.strptime(x,'%Y-%m-%d %H:%M:%S') for x in in_dat['Date']]
if args.TimeBase == "daily":
pass
elif args.TimeBase == "weekly":
for weeknum in range(1,53,1):
with open(root_path + '/' + file_name + 'w' + str(weeknum) + '.csv', 'w' ) as outfile:
print "Creating file for {0}".format(file_name + 'w' + str(weeknum) )
for ind, v in enumerate(temp_date):
if (v.isocalendar()[1] == weeknum):
#Date, GtDt, Heading, Pitch, Role, instzen, sunzen, sunaz, corr_sza, k_ratio, G_corr_factor, SPN Total
line = "{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11},{12}\n".format(in_dat['Date'][ind],in_dat[' GtDt'][ind],in_dat[' Heading'][ind],in_dat[' Pitch'][ind],in_dat[' Role'][ind],\
in_dat[' instzen'][ind], in_dat[' instaz'][ind],in_dat[' sunzen'][ind],in_dat[' sunaz'][ind],in_dat[' corr_sza'][ind],in_dat[' k_ratio'][ind],in_dat[' G_corr_factor'][ind],in_dat[' SPN Total'][ind])
outfile.write(line)
elif args.TimeBase == "monthly":
for month in range(1,13,1):
with open(root_path + '/' + file_name + datetime.datetime.strptime(str(month),'%m').strftime('%m')+'m', 'w' ) as outfile:
print "Creating file for {0}".format(file_name + datetime.datetime.strptime(str(month),'%m').strftime('%m'))
for ind, v in enumerate(temp_date):
if (v.month == month):
#Date, GtDt, Heading, Pitch, Role, instzen, sunzen, sunaz, corr_sza, k_ratio, G_corr_factor, SPN Total
line = "{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11},{12}\n".format(in_dat['Date'][ind],in_dat[' GtDt'][ind],in_dat[' Heading'][ind],in_dat[' Pitch'][ind],in_dat[' Role'][ind],\
in_dat[' instzen'][ind], in_dat[' instaz'][ind],in_dat[' sunzen'][ind],in_dat[' sunaz'][ind],in_dat[' corr_sza'][ind],in_dat[' k_ratio'][ind],in_dat[' G_corr_factor'][ind],in_dat[' SPN Total'][ind])
outfile.write(line)
else:
print "Choose either daily or monthly for the TimeBase flag"
|
{
"content_hash": "720c1bf3d0c625b8961bd1c5045a638b",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 218,
"avg_line_length": 46,
"alnum_prop": 0.5640732265446224,
"repo_name": "NOAA-PMEL/EcoFOCI_ENGR_Testing",
"id": "0726899b9b720144b37ac8e7346d0412ecf29480",
"size": "3519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oer_radiometer/oer_data_split.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "29840"
},
{
"name": "Python",
"bytes": "22400"
},
{
"name": "Shell",
"bytes": "1431"
}
],
"symlink_target": ""
}
|
"""
Usage: macresolve [options]
Options:
-h --help Show this help.
--version Show the version.
-a n --address n The MAC Address to get information on.
--update Check for an update to the database.
"""
__author__ = 'Cory Shay (cshay237@gmail.com)'
__copyright__ = 'Copyright (c) 2014 Cory Shay'
__version__ = '1.1.0'
from docopt import docopt
import cPickle as cp
import urllib2
import sys
def main(args):
print args
if args['--update']:
get_updates()
if args['--address']:
find_info(args['--address'])
def get_updates():
url_lines = []
try:
request = urllib2.urlopen('http://standards.ieee.org/develop/regauth'
'/oui/oui.txt')
url_lines = request.readlines()
except:
print >> sys.stderr, 'Unable to connect to the url.'
if url_lines:
print len(url_lines)
def find_info(mac_address):
pass
if __name__ == '__main__':
main(docopt(__doc__, version="Macresolve v%s"%(__version__)))
|
{
"content_hash": "03db2c10ee375207ca56e50ad746b9ba",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 77,
"avg_line_length": 26.1,
"alnum_prop": 0.578544061302682,
"repo_name": "ccsplit/Macresolve",
"id": "88b1337bff881ed6e45df76dc09d52e20f9213e6",
"size": "1090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/macresolve.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1090"
}
],
"symlink_target": ""
}
|
"""Support for the Italian train system using ViaggiaTreno API."""
import asyncio
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by ViaggiaTreno Data"
VIAGGIATRENO_ENDPOINT = ("http://www.viaggiatreno.it/viaggiatrenonew/"
"resteasy/viaggiatreno/andamentoTreno/"
"{station_id}/{train_id}")
REQUEST_TIMEOUT = 5 # seconds
ICON = 'mdi:train'
MONITORED_INFO = [
'categoria',
'compOrarioArrivoZeroEffettivo',
'compOrarioPartenzaZeroEffettivo',
'destinazione',
'numeroTreno',
'orarioArrivo',
'orarioPartenza',
'origine',
'subTitle',
]
DEFAULT_NAME = "Train {}"
CONF_NAME = 'train_name'
CONF_STATION_ID = 'station_id'
CONF_STATION_NAME = 'station_name'
CONF_TRAIN_ID = 'train_id'
ARRIVED_STRING = 'Arrived'
CANCELLED_STRING = 'Cancelled'
NOT_DEPARTED_STRING = "Not departed yet"
NO_INFORMATION_STRING = "No information for this train now"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_TRAIN_ID): cv.string,
vol.Required(CONF_STATION_ID): cv.string,
vol.Optional(CONF_NAME): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the ViaggiaTreno platform."""
train_id = config.get(CONF_TRAIN_ID)
station_id = config.get(CONF_STATION_ID)
name = config.get(CONF_NAME)
if not name:
name = DEFAULT_NAME.format(train_id)
async_add_entities([ViaggiaTrenoSensor(train_id, station_id, name)])
async def async_http_request(hass, uri):
"""Perform actual request."""
try:
session = hass.helpers.aiohttp_client.async_get_clientsession(hass)
with async_timeout.timeout(REQUEST_TIMEOUT):
req = await session.get(uri)
if req.status != 200:
return {'error': req.status}
json_response = await req.json()
return json_response
except (asyncio.TimeoutError, aiohttp.ClientError) as exc:
_LOGGER.error("Cannot connect to ViaggiaTreno API endpoint: %s", exc)
except ValueError:
_LOGGER.error("Received non-JSON data from ViaggiaTreno API endpoint")
class ViaggiaTrenoSensor(Entity):
"""Implementation of a ViaggiaTreno sensor."""
def __init__(self, train_id, station_id, name):
"""Initialize the sensor."""
self._state = None
self._attributes = {}
self._unit = ''
self._icon = ICON
self._station_id = station_id
self._name = name
self.uri = VIAGGIATRENO_ENDPOINT.format(
station_id=station_id, train_id=train_id)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def device_state_attributes(self):
"""Return extra attributes."""
self._attributes[ATTR_ATTRIBUTION] = ATTRIBUTION
return self._attributes
@staticmethod
def has_departed(data):
"""Check if the train has actually departed."""
try:
first_station = data['fermate'][0]
if data['oraUltimoRilevamento'] or first_station['effettiva']:
return True
except ValueError:
_LOGGER.error("Cannot fetch first station: %s", data)
return False
@staticmethod
def has_arrived(data):
"""Check if the train has already arrived."""
last_station = data['fermate'][-1]
if not last_station['effettiva']:
return False
return True
@staticmethod
def is_cancelled(data):
"""Check if the train is cancelled."""
if data['tipoTreno'] == 'ST' and data['provvedimento'] == 1:
return True
return False
async def async_update(self):
"""Update state."""
uri = self.uri
res = await async_http_request(self.hass, uri)
if res.get('error', ''):
if res['error'] == 204:
self._state = NO_INFORMATION_STRING
self._unit = ''
else:
self._state = "Error: {}".format(res['error'])
self._unit = ''
else:
for i in MONITORED_INFO:
self._attributes[i] = res[i]
if self.is_cancelled(res):
self._state = CANCELLED_STRING
self._icon = 'mdi:cancel'
self._unit = ''
elif not self.has_departed(res):
self._state = NOT_DEPARTED_STRING
self._unit = ''
elif self.has_arrived(res):
self._state = ARRIVED_STRING
self._unit = ''
else:
self._state = res.get('ritardo')
self._unit = 'min'
self._icon = ICON
|
{
"content_hash": "2d9a990c56eb12416f4ae583b1ef8404",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 78,
"avg_line_length": 30.570621468926554,
"alnum_prop": 0.5965625577527259,
"repo_name": "aequitas/home-assistant",
"id": "704cb77f5c8fc0688acc6e894a7a34bd04fe693d",
"size": "5411",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/viaggiatreno/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "15601734"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
"""Decorators for the Task API.
"""
__authors__ = [
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
import logging
import pickle
from functools import wraps
from google.appengine.ext import db
from soc.tasks import responses as task_responses
def task(func):
"""Task decorator wrapper method
"""
@wraps(func)
def wrapper(request, *args, **kwargs):
"""Decorator wrapper method
"""
try:
return func(request, *args, **kwargs)
except task_responses.FatalTaskError, error:
logging.exception(error)
return task_responses.terminateTask()
except Exception, exception:
logging.exception(exception)
return task_responses.repeatTask()
return wrapper
def iterative_task(logic, **task_default):
"""Iterative wrapper method
Args:
logic: the Logic instance to get entities for
task_default: keyword arguments which can contain the following options:
fields: dictionary to filter the entities on
start_key: the default key where to start this iterative task
"""
def wrapper(func):
def iterative_wrapped(request, *args, **kwargs):
"""Decorator wrapper method
Args:
request: Django HTTP Request object
request.POST usage:
fields: a JSON dict for the properties that the entities should have.
This updates values from the task_default entry.
start_key: the key of the next entity to fetch
Returns:
Standard HTTP Django response
"""
post_dict = request.POST
fields = task_default.get('fields', {})
if 'fields' in post_dict:
fields.update(pickle.loads(str(post_dict['fields'])))
start_key = task_default.get('start_key', None)
if 'start_key' in post_dict:
# get the key where to start this iteration
start_key = post_dict['start_key']
if start_key:
start_key = db.Key(start_key)
# get the entities for this iteration
entities, next_start_key = logic.getBatchOfData(filter=fields,
start_key=start_key)
# copy the post_dict so that the wrapped function can edit what it needs
context = post_dict.copy()
try:
func(request, entities=entities, context=context, *args, **kwargs)
except task_responses.FatalTaskError, error:
logging.debug(post_dict)
logging.error(error)
return task_responses.terminateTask()
except Exception, exception:
logging.debug(post_dict)
logging.error(exception)
return task_responses.repeatTask()
if next_start_key:
# set the key to use for the next iteration
context.update({'start_key': next_start_key})
task_responses.startTask(url=request.path, context=context)
return task_responses.terminateTask()
return iterative_wrapped
return wrapper
|
{
"content_hash": "0615dea7f203b69f3b8d4ed88ef655de",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 78,
"avg_line_length": 27.49532710280374,
"alnum_prop": 0.6519374575118967,
"repo_name": "MatthewWilkes/mw4068-packaging",
"id": "06d241531c0f284b8f25dd5ec0cb345a7713f27c",
"size": "3552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/melange/src/soc/tasks/helper/decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "68827"
},
{
"name": "HTML",
"bytes": "586705"
},
{
"name": "JavaScript",
"bytes": "441502"
},
{
"name": "Python",
"bytes": "2136551"
},
{
"name": "Shell",
"bytes": "5667"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
import survey
setup(
name='djaodjin-survey',
version=survey.__version__,
author='The DjaoDjin Team',
author_email='support@djaodjin.com',
packages=['survey',
'survey.api',
'survey.templatetags',
'survey.urls',
'survey.urls.api',
'survey.urls.api.sample',
'survey.urls.views',
'survey.views'],
package_data={'survey': [
'static/css/*',
'static/js/*',
'templates/survey/*',
'templates/survey/campaigns/*',
'templates/survey/matrix/*']},
url='https://github.com/djaodjin/djaodjin-survey/',
download_url='https://github.com/djaodjin/djaodjin-survey/tarball/%s' \
% survey.__version__,
license='BSD',
description='Survey Django app',
long_description=open('README.md').read(),
)
|
{
"content_hash": "2b0a83f6ef77e1845c6c5d298c021e2a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 75,
"avg_line_length": 30.96551724137931,
"alnum_prop": 0.5679287305122495,
"repo_name": "djaodjin/djaodjin-survey",
"id": "b76c24f2ef74ee0dd1817d0ab3fd99a6a4e2b40a",
"size": "2242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "296"
},
{
"name": "HTML",
"bytes": "29371"
},
{
"name": "JavaScript",
"bytes": "111241"
},
{
"name": "Makefile",
"bytes": "4583"
},
{
"name": "Python",
"bytes": "344027"
}
],
"symlink_target": ""
}
|
from data_collection.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = 'E07000109'
addresses_name = 'May 2017 - new data/Gravesham2polling_station_export-2017-04-06.csv'
stations_name = 'May 2017 - new data/Gravesham2polling_station_export-2017-04-06.csv'
elections = [
'local.kent.2017-05-04',
'parl.2017-06-08'
]
csv_encoding = 'latin-1'
|
{
"content_hash": "4d8d1e3ce293ca137b0f046adc9368d1",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 91,
"avg_line_length": 41.09090909090909,
"alnum_prop": 0.6814159292035398,
"repo_name": "chris48s/UK-Polling-Stations",
"id": "8df30b374a4af2d5a16c8e3f9b61142a9548c6a2",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling_stations/apps/data_collection/management/commands/import_gravesham.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "347"
},
{
"name": "Gherkin",
"bytes": "3720"
},
{
"name": "HTML",
"bytes": "30715"
},
{
"name": "JavaScript",
"bytes": "3226"
},
{
"name": "Python",
"bytes": "589520"
}
],
"symlink_target": ""
}
|
# -*- coding: utf-8 -*-
from .grammar import SchemaGrammar
class SQLiteSchemaGrammar(SchemaGrammar):
_modifiers = ['unsigned', 'nullable', 'default', 'increment']
_serials = ['big_integer', 'integer']
def compile_rename_column(self, blueprint, command, connection):
"""
Compile a rename column command.
:param blueprint: The blueprint
:type blueprint: Blueprint
:param command: The command
:type command: Fluent
:param connection: The connection
:type connection: orator.connections.Connection
:rtype: list
"""
sql = []
# If foreign keys are on, we disable them
foreign_keys = self._connection.select('PRAGMA foreign_keys')
if foreign_keys:
foreign_keys = bool(foreign_keys[0])
if foreign_keys:
sql.append('PRAGMA foreign_keys = OFF')
sql += super().compile_rename_column(
blueprint, command, connection)
if foreign_keys:
sql.append('PRAGMA foreign_keys = ON')
return sql
def compile_change(self, blueprint, command, connection):
"""
Compile a change column command into a series of SQL statement.
:param blueprint: The blueprint
:type blueprint: orator.schema.Blueprint
:param command: The command
:type command: Fluent
:param connection: The connection
:type connection: orator.connections.Connection
:rtype: list
"""
sql = []
# If foreign keys are on, we disable them
foreign_keys = self._connection.select('PRAGMA foreign_keys')
if foreign_keys:
foreign_keys = bool(foreign_keys[0])
if foreign_keys:
sql.append('PRAGMA foreign_keys = OFF')
sql += super(SQLiteSchemaGrammar,
self).compile_change(blueprint, command, connection)
if foreign_keys:
sql.append('PRAGMA foreign_keys = ON')
return sql
def compile_table_exists(self):
"""
Compile the query to determine if a table exists
:rtype: str
"""
result = ("SELECT * FROM sqlite_master WHERE type = 'table' "
"AND name = %(marker)s" % {'marker': self.get_marker()})
return result
def compile_column_exists(self, table):
"""
Compile the query to determine the list of columns.
"""
return 'PRAGMA table_info(%s)' % table.replace('.', '__')
def compile_create(self, blueprint, command, _):
"""
Compile a create table command.
"""
columns = ', '.join(self._get_columns(blueprint))
sql = 'CREATE TABLE %s (%s' % (self.wrap_table(blueprint), columns)
sql += self._add_foreign_keys(blueprint)
sql += self._add_primary_keys(blueprint)
return sql + ')'
def _add_foreign_keys(self, blueprint):
sql = ''
foreigns = self._get_commands_by_name(blueprint, 'foreign')
for foreign in foreigns:
sql += self._get_foreign_key(foreign)
if foreign.get('on_delete'):
sql += ' ON DELETE %s' % foreign.on_delete
if foreign.get('on_update'):
sql += ' ON UPDATE %s' % foreign.on_delete
return sql
def _get_foreign_key(self, foreign):
on = self.wrap_table(foreign.on)
columns = self.columnize(foreign.columns)
references = foreign.references
if not isinstance(references, list):
references = [references]
on_columns = self.columnize(references)
return ', FOREIGN KEY(%s) REFERENCES %s(%s)' % (
columns, on, on_columns)
def _add_primary_keys(self, blueprint):
primary = self._get_command_by_name(blueprint, 'primary')
if primary:
columns = self.columnize(primary.columns)
return ', PRIMARY KEY (%s)' % columns
return ''
def compile_add(self, blueprint, command, _):
table = self.wrap_table(blueprint)
columns = self.prefix_list('ADD COLUMN', self._get_columns(blueprint))
statements = []
for column in columns:
statements.append('ALTER TABLE %s %s' % (table, column))
return statements
def compile_unique(self, blueprint, command, _):
columns = self.columnize(command.columns)
table = self.wrap_table(blueprint)
return 'CREATE UNIQUE INDEX %s ON %s (%s)' % (
command.index, table, columns)
def compile_index(self, blueprint, command, _):
columns = self.columnize(command.columns)
table = self.wrap_table(blueprint)
return 'CREATE INDEX %s ON %s (%s)' % (command.index, table, columns)
def compile_foreign(self, blueprint, command, _):
pass
def compile_drop(self, blueprint, command, _):
return 'DROP TABLE %s' % self.wrap_table(blueprint)
def compile_drop_if_exists(self, blueprint, command, _):
return 'DROP TABLE IF EXISTS %s' % self.wrap_table(blueprint)
def compile_drop_column(self, blueprint, command, connection):
schema = connection.get_schema_manager()
table_diff = self._get_table_diff(blueprint, schema)
for name in command.columns:
column = connection.get_column(blueprint.get_table(), name)
table_diff.removed_columns[name] = column
return schema.get_database_platform().get_alter_table_sql(table_diff)
def compile_drop_unique(self, blueprint, command, _):
return 'DROP INDEX %s' % command.index
def compile_drop_index(self, blueprint, command, _):
return 'DROP INDEX %s' % command.index
def compile_rename(self, blueprint, command, _):
from_ = self.wrap_table(blueprint)
return 'ALTER TABLE %s RENAME TO %s' % (
from_, self.wrap_table(command.to))
# why need comment in SQLite Schema?
# Because multi orator type dump to same SQLite type
# so meet a SQLite type, doesn't know the origin type
def _type_char(self, column):
return 'VARCHAR /*char(%%s,%s)*/' % column.length
def _type_string(self, column):
return 'VARCHAR /*string(%%s,%s)*/' % column.length
def _type_text(self, column):
return 'TEXT /*text(%s)*/'
def _type_medium_text(self, column):
return 'TEXT /*medium_text(%s)*/'
def _type_long_text(self, column):
return 'TEXT /*long_text(%s)*/'
def _type_integer(self, column):
return 'INTEGER /*integer(%s)*/'
def _type_big_integer(self, column):
return 'INTEGER /*big_integer(%s)*/'
def _type_medium_integer(self, column):
return 'INTEGER /*medium_integer(%s)*/'
def _type_tiny_integer(self, column):
return 'TINYINT /*tiny_integer(%s)*/'
def _type_small_integer(self, column):
return 'INTEGER /*small_integer(%s)*/'
def _type_float(self, column):
return 'FLOAT /*float(%s)*/'
def _type_double(self, column):
if column.total and column.places:
return 'FLOAT /*double(%%s,%s,%s)*/' % (
column.total, column.places)
return 'FLOAT /*double(%s)*/'
def _type_decimal(self, column):
return 'NUMERIC /*DECIMAL(%%s,%s,%s)*/' % (
column.total, column.places)
def _type_boolean(self, column):
return 'TINYINT /*boolean(%s)*/'
def _type_enum(self, column):
return 'VARCHAR /*enum(%%s,%s)*/' % column.allowed
def _type_json(self, column):
return 'TEXT /*json(%s)*/'
def _type_date(self, column):
return 'DATE /*date(%s)*/'
def _type_datetime(self, column):
return 'DATETIME /*datetime(%s)*/'
def _type_time(self, column):
return 'TIME /*time(%s)*/'
def _type_timestamp(self, column):
if column.use_current:
return 'DATETIME /*timestamp(%s)*/ DEFAULT CURRENT_TIMESTAMP'
return 'DATETIME /*timestamp(%s)*/'
def _type_binary(self, column):
return 'BLOB /*binary*/'
def _modify_nullable(self, blueprint, column):
if column.get('nullable'):
return ' NULL'
return ' NOT NULL'
def _modify_unsigned(self, blueprint, column):
# SQLite doesn't have unsigned
# but the schema dumper need this info
if column.get('unsigned', False):
return ' /*unsigned*/'
return ''
def _modify_default(self, blueprint, column):
if column.get('default') is not None:
return ' DEFAULT %s' % self._get_default_value(column.default)
return ''
def _modify_increment(self, blueprint, column):
if column.type in self._serials and column.auto_increment:
return ' PRIMARY KEY AUTOINCREMENT'
return ''
def _get_dbal_column_type(self, type_):
"""
Get the dbal column type.
:param type_: The fluent type
:type type_: str
:rtype: str
"""
type_ = type_.lower()
if type_ == 'enum':
return 'string'
return super()._get_dbal_column_type(type_)
def _list_tables(self):
sql = """\
SELECT name AS table_name
FROM sqlite_master
WHERE type="table"
"""
return sql
def _list_columns(self, table):
sql = """\
PRAGMA table_info('{}');
""".format(table)
return sql
def _plain_sql(self, column):
sql = """\
SELECT sql
FROM sqlite_master
WHERE type = 'table'
AND name = '{}'
""".format(column)
return sql
def _list_indexes(self, table):
sql = """\
PRAGMA index_list('{}')
""".format(table)
return sql
def _show_index(self, index):
sql = """\
PRAGMA index_info('{}')
""".format(index)
return sql
def _list_foreign_keys(self, table):
sql = """\
PRAGMA foreign_key_list('{}')
""".format(table)
return sql
|
{
"content_hash": "ba1ea361b9c6aaf2b7a606229bfe5507",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 78,
"avg_line_length": 28.462184873949578,
"alnum_prop": 0.568743233933668,
"repo_name": "Hanaasagi/sorator",
"id": "8df91c6b34032c0d6f4c0950b4b4025e8db29746",
"size": "10161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orator/schema/grammars/sqlite_grammar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2023"
},
{
"name": "Python",
"bytes": "1070898"
}
],
"symlink_target": ""
}
|
from sys import stdout
from django.core.management.base import BaseCommand
from frano.quotes.models import Quote
from frano.quotes.models import refresh_price_history
class Command(BaseCommand):
help = 'Refreshes the price history for all quotes'
def handle(self, *args, **options):
quotes = Quote.objects.all()
stdout.write('Found %d quotes to refresh price history\nStarting...\n' % quotes.count())
for quote in quotes:
stdout.write('Refreshing price history for: %s\n' % quote.symbol)
refresh_price_history(quote)
stdout.write('Successfully refreshed priced history\n')
|
{
"content_hash": "9d584d42ab42a2460052f0ae5536aabe",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 92,
"avg_line_length": 32.578947368421055,
"alnum_prop": 0.7269789983844911,
"repo_name": "fxdemolisher/frano",
"id": "313f2dceea9850c1469f2a84b35885ac9b4ca631",
"size": "739",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frano/management/commands/refresh_price_history.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "73231"
},
{
"name": "Python",
"bytes": "100848"
},
{
"name": "Shell",
"bytes": "297"
}
],
"symlink_target": ""
}
|
"""
This module holds functions for users, most notably
* creating users
* logging in as a user (and persisting state with sessions)
"""
import json
import requests
import sys
from constants import BASE_API_URL, BASE_URL, HEADERS
def create_user(user_info):
"""
Create a user from the provided parameters.
HOW:
POST the user payload (see code below)
FORM ENCODED (key/value) not JSON
TO /auth/signup
NOTES:
HEADERS are not required for this route.
FUNCTION PARAMS:
first_name: (string) new user's first name
last_name: (string) new user's last name
email: (string) new user's email address
password: (string) new user's password
phone: (string) new user's contact number
company: (string) your company's name
title: (string) new user's title
FUNCTION RETURNS:
User payload in JSON. We suggest you store the email or key to this user
to use in future calls.
"""
create_user_url = "/".join([BASE_URL, "auth/signup"])
user_payload = {
"company": user_info.get("company"),
"first_name": user_info.get("first_name"),
"last_name": user_info.get("last_name"),
"email": user_info.get("email"),
"password": user_info.get("password"),
"phone": user_info.get("phone"),
"title": user_info.get("title"),
"organization_name": user_info.get("company")
}
response = requests.post(create_user_url, data=user_payload)
if response.status_code != 200:
print "[ERROR]: Could not create user."
sys.exit(1)
def login_user(email, password):
"""
Login as the user to customize his/her profile.
HOW:
POST 'email' and 'password'
FORM ENCODED (key/value) not JSON
TO /api/v1/auth
NOTES:
HEADERS are required. Pass your API token as X-Client-Id.
Once logged in, we suggest you establish a cookie-based session for
making calls on behalf of this user. See 'session' in the code.
FUNCTION RETURNS
(requests.session) to store user session info for subsequent calls.
"""
login_url = "/".join([BASE_API_URL, "auth"])
login_payload = {
"email": email,
"password": password,
}
# This session holds the user's authentication and persists.
session = requests.Session()
response = session.post(login_url,
data=login_payload,
headers=HEADERS)
if response.status_code == 200:
return session, json.loads(response.content)["user_info"]
else:
print "[ERROR]: Could not login as user. Response from server:"
print " {}".format(response.content)
sys.exit(1)
def assign_user_photo(sess, user, photo_url):
"""
This will assign the user's profile photo.
HOW:
1. GET the user entity you want to update
AT /api/v1/users/<keystring>
2. Update that entity's "photo" field with a valid image URL.
3. PUT the user payload
JSON encoded
TO /api/v1/users/<keystring>
NOTES:
When you get the user it returns two non-editable fields:
(1) is_premium
(2) email_verified
You need to remove these fields before updating the user. If you don't
the server will reject your request.
"""
user_url = "/".join([BASE_API_URL, "users/{}".format(user.get("key"))])
# GET returns the true User model
response = sess.get(url=user_url, headers=HEADERS)
if response.status_code != 200:
print "[ERROR]: Could not assign user photo. Response from server: {}".format(response.content)
sys.exit(1)
user = json.loads(response.content)
del user["is_premium"] # read only
del user["email_verified"] # read only
user.update({"photo": photo_url})
response = sess.put(url=user_url,
data=json.dumps(user),
headers=HEADERS)
if response.status_code != 200:
print "[ERROR]: Could not assign user photo. Response from server: {}".format(response.content)
sys.exit(1)
|
{
"content_hash": "b1b0c64214a653d8afed7aa41cffd61a",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 103,
"avg_line_length": 30.91176470588235,
"alnum_prop": 0.6082302568981922,
"repo_name": "RealMassive/SimpleInventoryDemo",
"id": "be4e154ae6f7cc0f84000087b4060191644d0871",
"size": "4204",
"binary": false,
"copies": "1",
"ref": "refs/heads/staging",
"path": "user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19168"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('structure', '0014_remove_customer_type'),
('marketplace_checklist', '0006_answer_project_remove'),
]
operations = [
migrations.AddField(
model_name='checklist',
name='customers',
field=models.ManyToManyField(to='structure.Customer'),
),
]
|
{
"content_hash": "e144941bb4b09aaecc96a2e342136387",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 25.058823529411764,
"alnum_prop": 0.6032863849765259,
"repo_name": "opennode/nodeconductor-assembly-waldur",
"id": "3603dd1a790bef765fa3044cc8d4a53f64a4d882",
"size": "476",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_mastermind/marketplace_checklist/migrations/0007_checklist_customers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1624"
},
{
"name": "Python",
"bytes": "412263"
},
{
"name": "Shell",
"bytes": "2031"
}
],
"symlink_target": ""
}
|
import contextlib
from taskflow.persistence import backends
from taskflow.persistence.backends import impl_memory
from taskflow import test
from taskflow.tests.unit.persistence import base
class MemoryPersistenceTest(test.TestCase, base.PersistenceTestMixin):
def setUp(self):
super(MemoryPersistenceTest, self).setUp()
self._backend = impl_memory.MemoryBackend({})
def _get_connection(self):
return self._backend.get_connection()
def tearDown(self):
conn = self._get_connection()
conn.clear_all()
self._backend = None
super(MemoryPersistenceTest, self).tearDown()
def test_memory_persistence_entry_point(self):
conf = {'connection': 'memory:'}
with contextlib.closing(backends.fetch(conf)) as be:
self.assertIsInstance(be, impl_memory.MemoryBackend)
|
{
"content_hash": "76ec38dd2a0bcee446854bde5e2ac1c2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 70,
"avg_line_length": 32.96153846153846,
"alnum_prop": 0.7036172695449242,
"repo_name": "citrix-openstack-build/taskflow",
"id": "a4d02c44dfa2d82b00e4f61508fa5490738b9591",
"size": "1520",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "taskflow/tests/unit/persistence/test_memory_persistence.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "702993"
},
{
"name": "Shell",
"bytes": "1988"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.