text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from djblets.db.query import LocalDataQuerySet
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import webapi_request_fields
from djblets.webapi.fields import IntFieldType, StringFieldType
from djblets.webapi.responses import WebAPIResponsePaginated
from reviewboard.hostingsvcs.repository import RemoteRepository
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import (webapi_check_local_site,
webapi_check_login_required)
from reviewboard.webapi.resources import resources
class RemoteRepositoryResponsePaginated(WebAPIResponsePaginated):
"""Provides paginated responses for lists of RemoteRepository objects.
This is a specialization of WebAPIResponsePaginated designed to
return lists of RemoteRepository objects and to handle pagination in
a way that's compatible with the pagination models of HostingService.
"""
def __init__(self, request, queryset, *args, **kwargs):
self.paginator = queryset[0]
super(RemoteRepositoryResponsePaginated, self).__init__(
request, queryset=None, *args, **kwargs)
def has_prev(self):
return self.paginator.has_prev
def has_next(self):
return self.paginator.has_next
def get_prev_index(self):
return max(self.start - 1, 0)
def get_next_index(self):
return self.start + 1
def get_results(self):
return self.paginator.page_data
def get_total_results(self):
return None
def build_pagination_url(self, full_path, start, max_results,
query_parameters):
return '%s?start=%s%s' % (full_path, start, query_parameters)
class RemoteRepositoryResource(WebAPIResource):
"""Returns information on remote repositories on a hosting service.
This can be used to look up the information needed to connect to a
remote repository or to add a repository to Review Board. Only remote
repositories that are accessible to the linked hosting service account
(i.e., that of the parent resource) will be provided by this resource.
"""
added_in = '2.5'
name = 'remote_repository'
name_plural = 'remote_repositories'
model = RemoteRepository
model_object_key = 'id'
model_parent_key = 'hosting_service_account'
uri_object_key = 'repository_id'
uri_object_key_regex = r'[A-Za-z0-9_./-]+'
paginated_cls = RemoteRepositoryResponsePaginated
fields = {
'id': {
'type': StringFieldType,
'description': 'The unique ID for this repository on the '
'hosting service.',
},
'name': {
'type': StringFieldType,
'description': 'The name of the repository.',
},
'owner': {
'type': StringFieldType,
'description': 'The owner of the repository, which may be a user '
'account or an organization, depending on the '
'service.',
},
'scm_type': {
'type': StringFieldType,
'description': 'The type of repository, mapping to registered '
'SCMTools on Review Board.',
},
'path': {
'type': StringFieldType,
'description': 'The repository path as recommended by the hosting '
'service.',
},
'mirror_path': {
'type': StringFieldType,
'description': 'A secondary path that can be used to reach the '
'repository.',
},
}
uri_object_key = 'repository_id'
autogenerate_etags = True
allowed_methods = ('GET',)
def has_list_access_permissions(self, request, *args, **kwargs):
account = resources.hosting_service_account.get_object(
request, *args, **kwargs)
# Only allow administrators or those with the ability to modify the
# account to see what repositories are listed.
return account.is_mutable_by(request.user)
def has_access_permissions(self, request, remote_repository,
*args, **kwargs):
# Only allow administrators or those with the ability to modify the
# account to see what repositories are listed.
return remote_repository.hosting_service_account.is_mutable_by(
request.user)
def get_queryset(self, request, start=None, is_list=False,
repository_id=None, *args, **kwargs):
account = resources.hosting_service_account.get_object(
request, *args, **kwargs)
if is_list:
# Wrap the paginator in a LocalDataQuerySet, so that we can get
# to it later in RemoteRepositoryResponsePaginated.
lookup_kwargs = {}
for name in ('owner', 'owner-type', 'filter-type'):
if kwargs.get(name):
arg = name.replace('-', '_')
lookup_kwargs[arg] = kwargs[name]
result = account.service.get_remote_repositories(start=start,
**lookup_kwargs)
else:
result = account.service.get_remote_repository(repository_id)
return LocalDataQuerySet([result])
def get_serializer_for_object(self, obj):
if isinstance(obj, RemoteRepository):
return self
return super(RemoteRepositoryResource, self).get_serializer_for_object(
obj)
# NOTE: We're not augmenting from any resources, because we don't want to
# include ?counts-only= or ?max-results=, and we have a different
# ?start=. Because of this, we need to be careful to apply our own
# decorators.
@webapi_check_login_required
@webapi_check_local_site
@webapi_request_fields(
optional={
'owner': {
'type': StringFieldType,
'description': 'The owner (user account or organization) '
'to look up repositories for. Defaults to '
'the owner of the hosting service account.',
},
'owner-type': {
'type': StringFieldType,
'description': 'Indicates what sort of account the owner '
'represents. This may be required by some '
'services, and the values are dependent on '
'that service.',
},
'filter-type': {
'type': StringFieldType,
'description': 'Filters the list of results. Allowed values '
'are dependent on the hosting service. '
'Unexpected values will be ignored.',
},
'start': {
'type': IntFieldType,
'description': 'The 0-based index of the first page of '
'results to fetch.',
},
},
allow_unknown=True
)
def get_list(self, request, *args, **kwargs):
"""Returns the list of remote repositories on the hosting service.
Different hosting service backends have different criteria for
performing the lookups. Some hosting services have multiple types of
owners, specified by passing ``owner-type``. Filtering may also be
possible by passing ``filter-type``. Performing lookups requires
knowing the possible values for the service ahead of time and passing
the proper parameters in the query string.
Pagination works a bit differently for this resource than most.
Instead of ``?start=`` taking an index into the number of results,
this resource's ``?start=`` takes a 0-based index of the page of
results.
``?max-results=`` and ``?counts-only=`` are not supported, as they're
not compatible with all hosting services.
Callers should always use the ``next`` and ``prev`` links for
navigation, and should not build page indexes themselves.
"""
return super(RemoteRepositoryResource, self).get_list(
request, *args, **kwargs)
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Provides information on a particular remote repository.
If the remote repository exists and is accessible by the linked
hosting service account (that of the parent resource), then the
details of that repository will be returned in the payload.
The ID expected for the lookup in the URL is specific to the type
of hosting service.
"""
pass
remote_repository_resource = RemoteRepositoryResource()
|
{
"content_hash": "554d000a0eefe02e69e2ea0fa52289f9",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 79,
"avg_line_length": 39.48444444444444,
"alnum_prop": 0.6020936515083296,
"repo_name": "reviewboard/reviewboard",
"id": "8b992b439985c7a80502d968db13d67f639f93c2",
"size": "8884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/webapi/resources/remote_repository.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
}
|
def remove_adjacent(nums):
adjacent=[]
for num in nums:
if len(adjacent)==0 or num not in adjacent:
adjacent.append(num)
return adjacent
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
return
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
|
{
"content_hash": "5d4469318eec56b9e442c67e8ac35fee",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 32.425925925925924,
"alnum_prop": 0.6407766990291263,
"repo_name": "rudy750/Google_Class",
"id": "0627c545af968dfabf13cf05c34dfd74ba354a49",
"size": "2228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic/list2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
""" Functions for visualizing lists of iGraph objects as SVG images
Filenames for SVGs:
When saving a single graphs (one graph per SVG):
[fout]_c[count].svg
Where fout is a function parameter (str) and count is retrieved from the list
when saving a grid (square) of graphs:
combined_[fout].svg
Where fout is a function parameter (str)
"""
import os
from operator import itemgetter
from igraph import Graph
import svgutils.transform as sg # Needed for combining SVG files
def visualize_separate(fout, graphs, top=True, n=None):
""" Save the graphs in a list of graphs as separate SVG files
Args:
fout (str): Filename prefix to save SVG to (appended by count info)
graphs (list[tuple[Graph,int,float]]):
List of graphs in format of (graph,count,score) tuples
Technically only graph and count are needed
n (int): The number of patterns to save (save the top-n)
"""
if not n:
n = len(graphs)
if (n > len(graphs)):
raise ValueError("N (%d) is greater than list size (%d)"
% (n, len(graphs)))
graphs = sorted(graphs, key=itemgetter(2), reverse=top) # sort first
for i in range(n):
g, c, s = graphs[i]
fname = "{}_c{}_i{}.svg".format(fout, c, i)
print("Saving pattern #{} to {}..".format(i+1, fname), end='\r')
g.write_svg(fname, labels="label")
def visualize_grid(fout, graphs, top=True, n=None, delete_singles=True):
""" Visualize a list of graphs in a square grid format SVG file
Args:
fout (str): Filename to save SVG to
graphs (list[tuple[Graph,int,float]]):
List of graphs in format of (graph,count,score) tuples
Technically only graph and score are needed
n (int): The dimensions of the grid (nXn square)
If no 'n' is provided then the smallest square (that can
fit all graphs in the list) is used
delete_singles (bool): True to delete intermediate single SVG files
Warning: Function creates temporary files, which are cleaned up on exit
Note: the final SVG can be easily converted to PNG, e.g. w/ inkscape via
inkscape --export-pdf=fig_final.pdf fig_final.svg
inkscape --export-png=fig_final.png fig_final.svg
"""
if len(graphs) == 0:
return
# find a square size that captures all items in the list
if not n:
n = 1
while(n*n < len(graphs)):
n = n+1
# grid dimensions are: n x n, so we have n*n total elements
N = n*n
# generate the separate SVGs before we combine them
# we need to generate N SVGs
n_graphs = min(len(graphs), N) # number of graphs on the grid - blank spaces
visualize_separate(fout, graphs, top, n_graphs)
# Load the saved figures into memory
graphs = sorted(graphs, key=itemgetter(2), reverse=top) # sort first
fnames = ["{}_c{}_i{}.svg".format(
fout, c, i) for i, (g, c, s) in enumerate(graphs[:N])]
figs = [sg.fromfile(f) for f in fnames]
# get the plot objects
plots = [fig.getroot() for fig in figs]
# create the captions
caps = ["Appeared {} times".format(c) for g, c, s in graphs[:N]]
width, height = figs[0].get_size()
width, height = int(width[:-2]), int(height[:-2]) # strip 'px' from '400px'
padding = 100
# organize into n x n grid
i = 0
for r in range(n):
for c in range(n):
# leave a blank space in the grid if we're out of graphs
if i >= len(graphs):
continue
x = padding + r*(height+padding)
y = padding + c*(width+padding)
plots[i].moveto(x, y)
# add text labels
caps[i] = sg.TextElement(x, y+height+(padding/2),
caps[i], size=25, weight="bold")
i += 1
# create new combined SVG figure
fig = sg.SVGFigure("{}px".format(padding + n*(height+padding)),
"{}px".format(padding + n*(width+padding)))
# append plots and labels to figure
fig.append(plots)
fig.append(caps)
fname = "combined_{}.svg".format(fout)
print("Saving combined SVG to {}...".format(fname))
fig.save(fname)
# clear the independent SVG files, leaving only the combined file
if delete_singles:
print("Clearing leftover SVG files...")
for f in fnames:
os.remove(f)
|
{
"content_hash": "29089455d7d77fdad70bb67c79c6c6a7",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 81,
"avg_line_length": 34.921875,
"alnum_prop": 0.6006711409395973,
"repo_name": "cpacker/GraphZip",
"id": "803e95ac7d0fef0cf5e7c98949505f6161f58618",
"size": "4470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compressor/visualize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60810"
}
],
"symlink_target": ""
}
|
from django.db import models
from bitfield_manager.models import ParentBitfieldModelMixin, ChildBitfieldModelMixin
from bitfield import BitField
class BaseTestModel(models.Model):
"""
Base for test models that sets app_label, so they play nicely.
"""
class Meta:
app_label = 'tests'
abstract = True
class ParentTestModel(ParentBitfieldModelMixin, BaseTestModel):
STATUS_CHILD1 = 0
STATUS_CHILD2 = 1
STATUS_CHILD3 = 2
STATUS_CHILD_CHILD = 3
STATUS_CHILD_M2M = 4
name = models.CharField(max_length=255)
status = models.BigIntegerField()
secondary_status = models.BigIntegerField()
bitfield_status = BitField(flags=(
('status_child1', 'Status Child 1'),
('status_child2', 'Status Child 2'),
('status_child3', 'Status Child 3'),
('status_child_child', 'Status Child Child'),
('status_child_m2m', 'Status Child M2M')
))
def __str__(self):
return "name: %s status: %i" % (self.name, self.status)
class ChildTestModel1(ChildBitfieldModelMixin, BaseTestModel):
parent = models.ForeignKey('ParentTestModel', related_name='childtestmodels1', null=True)
class BitfieldMeta:
parent_models = [('parent.status', ParentTestModel.STATUS_CHILD1),
('parent.bitfield_status', ParentTestModel.bitfield_status.status_child1)]
class ChildTestModel2(ChildBitfieldModelMixin, BaseTestModel):
parent = models.OneToOneField('ParentTestModel', related_name='childtestmodels2')
class BitfieldMeta:
parent_models = [('parent.status', ParentTestModel.STATUS_CHILD2),
('parent.bitfield_status', ParentTestModel.bitfield_status.status_child2)]
class ChildTestModel3(ChildBitfieldModelMixin, BaseTestModel):
parent = models.ForeignKey('ParentTestModel', related_name='childtestmodels3')
class BitfieldMeta:
parent_models = [('parent.status', ParentTestModel.STATUS_CHILD3),
('parent.secondary_status', ParentTestModel.STATUS_CHILD3),
('parent.bitfield_status', ParentTestModel.bitfield_status.status_child3)]
class ChildChildTestModel(ChildBitfieldModelMixin, BaseTestModel):
child = models.ForeignKey('ChildTestModel1', related_name='childchildtestmodels')
class BitfieldMeta:
parent_models = [('child.parent.status', ParentTestModel.STATUS_CHILD_CHILD),
('child.parent.bitfield_status', ParentTestModel.bitfield_status.status_child_child)]
class ChildManyToManyTestModel(ChildBitfieldModelMixin, BaseTestModel):
parent = models.ManyToManyField('ParentTestModel', related_name='childmanytomanytestmodels')
class BitfieldMeta:
parent_models = [('parent.status', ParentTestModel.STATUS_CHILD_M2M),
('parent.bitfield_status', ParentTestModel.bitfield_status.status_child_m2m)]
class BrokenChildTestModel(ChildBitfieldModelMixin, BaseTestModel):
class BitfieldMeta:
parent_models = [('badparent.status', 0)]
class Unrelated(ChildBitfieldModelMixin, BaseTestModel):
parent = models.ForeignKey('ParentTestModel', related_name='unrelatedmodels3')
|
{
"content_hash": "38864fb5cad4e30924f405558cda4d5a",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 110,
"avg_line_length": 37.694117647058825,
"alnum_prop": 0.7000624219725343,
"repo_name": "goodmase/django-bitfield-manager",
"id": "656fc678ec141a88a8aee8ce69c7e324008645f4",
"size": "3204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2882"
},
{
"name": "Makefile",
"bytes": "1611"
},
{
"name": "Python",
"bytes": "39209"
}
],
"symlink_target": ""
}
|
import sys
import rsync4python.rsync
def rdiff_signature(basefilename, signature_filename):
with open(basefilename, 'rb') as base:
with open(signature_filename, 'wb') as signature:
rsync4python.rsync.rsync.signature(base, signature)
def rdiff_patch(basefilename, deltafilename, finalfilename):
with open(basefilename, 'rb') as base:
with open(deltafilename, 'rb') as delta:
with open(finalfilename, 'wb') as final:
rsync4python.rsync.rsync.patch(base, delta, final)
|
{
"content_hash": "970d4af1108df5c03452e4141ee01f0c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 66,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.6852886405959032,
"repo_name": "BenjamenMeyer/rsync4python",
"id": "45cee2a261b75506c212850de1111c03120d706b",
"size": "537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rsync4python/rdiff.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9438"
},
{
"name": "Shell",
"bytes": "1220"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import time
import sys
print("Running slow program")
sys.stdout.flush() # Make sure the print gets flushed so it appears in lit output.
time.sleep(6)
|
{
"content_hash": "06d812ab30fd9276dc7369b863882e24",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 82,
"avg_line_length": 23.75,
"alnum_prop": 0.7578947368421053,
"repo_name": "silverneko/HWs",
"id": "2dccd6331360b46d4e29350be871e83b6eb9bd91",
"size": "210",
"binary": false,
"copies": "48",
"ref": "refs/heads/master",
"path": "compiler-hw-2-master/hw3/llvm-3.8.0.src/utils/lit/tests/Inputs/shtest-timeout/slow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "11314045"
},
{
"name": "Batchfile",
"bytes": "16268"
},
{
"name": "C",
"bytes": "20037807"
},
{
"name": "C++",
"bytes": "14668867"
},
{
"name": "CMake",
"bytes": "259362"
},
{
"name": "CSS",
"bytes": "3339"
},
{
"name": "Cuda",
"bytes": "26137"
},
{
"name": "Emacs Lisp",
"bytes": "9440"
},
{
"name": "Go",
"bytes": "133381"
},
{
"name": "Haxe",
"bytes": "140982"
},
{
"name": "LLVM",
"bytes": "42152280"
},
{
"name": "Lex",
"bytes": "17360"
},
{
"name": "M4",
"bytes": "102755"
},
{
"name": "Makefile",
"bytes": "289041"
},
{
"name": "Matlab",
"bytes": "3289"
},
{
"name": "OCaml",
"bytes": "392667"
},
{
"name": "Objective-C",
"bytes": "145449"
},
{
"name": "Perl",
"bytes": "43307"
},
{
"name": "Python",
"bytes": "596622"
},
{
"name": "Roff",
"bytes": "18916"
},
{
"name": "Scilab",
"bytes": "105758"
},
{
"name": "Shell",
"bytes": "211792"
},
{
"name": "Vim script",
"bytes": "16523"
},
{
"name": "Yacc",
"bytes": "102681"
}
],
"symlink_target": ""
}
|
import sys, os
delete = os.path.join('src','tools')
root_path = os.getcwd().replace(delete,'')
sys.path.append(root_path)
|
{
"content_hash": "46a1de2fcfd1f949718b8cfb1e769a4f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 42,
"avg_line_length": 24.6,
"alnum_prop": 0.6910569105691057,
"repo_name": "golsun/GPS",
"id": "0a656adfd219c15c28e96647c4cf6b191321ead4",
"size": "123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tools/add_root_path.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "362740"
}
],
"symlink_target": ""
}
|
'''
Installer for tratihubis.
Developer cheat sheet
---------------------
Create the installer archive::
$ python setup.py sdist --formats=zip
Upload release to PyPI::
$ pep8 -r --ignore=E501 *.py test/*.py
$ python test/test_tratihubis.py
$ python setup.py sdist --formats=zip upload
Tag a release::
$ git tag -a -m 'Tagged version 1.x.' v1.x
$ git push --tags
'''
# Copyright (c) 2012-2013, Thomas Aglassinger
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Thomas Aglassinger nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup
import tratihubis
setup(
name='tratihubis',
version=tratihubis.__version__,
py_modules=['tratihubis'],
description='convert Trac tickets to Github issues',
keywords='trac github ticket issue convert migrate',
author='Thomas Aglassinger',
author_email='roskakori@users.sourceforge.net',
url='http://pypi.python.org/pypi/tratihubis/',
license='BSD License',
long_description=tratihubis.__doc__, # @UndefinedVariable
install_requires=['PyGithub>=1.8', 'setuptools'],
entry_points={
"console_scripts": [
"tratihubis = tratihubis:_mainEntryPoint",
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Bug Tracking',
]
)
|
{
"content_hash": "858e8fefe670d25a97822c2c2b97d568",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 79,
"avg_line_length": 37.8125,
"alnum_prop": 0.7094214876033058,
"repo_name": "i-rabot/tractogithub",
"id": "a5c721dd4cdd117f11fa9759aab3ac286df6c4ad",
"size": "3025",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "72236"
},
{
"name": "HTML",
"bytes": "293190"
},
{
"name": "JavaScript",
"bytes": "82208"
},
{
"name": "Python",
"bytes": "2018196"
}
],
"symlink_target": ""
}
|
import numpy
class LearningRate(object):
def __init__(self):
'''constructor'''
def get_rate(self):
pass
def get_next_rate(self, current_error):
pass
class LearningRateConstant(LearningRate):
def __init__(self, learning_rate = 0.08, epoch_num = 20):
self.learning_rate = learning_rate
self.epoch = 1
self.epoch_num = epoch_num
self.rate = learning_rate
def get_rate(self):
return self.rate
def get_next_rate(self, current_error):
if ( self.epoch >= self.epoch_num):
self.rate = 0.0
else:
self.rate = self.learning_rate
self.epoch += 1
return self.rate
class LearningRateExpDecay(LearningRate):
def __init__(self, start_rate = 0.08, scale_by = 0.5,
min_derror_decay_start = 0.05, min_derror_stop = 0.05, init_error = 100,
decay=False, min_epoch_decay_start=15, zero_rate = 0.0):
self.start_rate = start_rate
self.init_error = init_error
self.rate = start_rate
self.scale_by = scale_by
self.min_derror_decay_start = min_derror_decay_start
self.min_derror_stop = min_derror_stop
self.lowest_error = init_error
self.epoch = 1
self.decay = decay
self.zero_rate = zero_rate
self.min_epoch_decay_start = min_epoch_decay_start
def get_rate(self):
return self.rate
def get_next_rate(self, current_error):
diff_error = 0.0
diff_error = self.lowest_error - current_error
if (current_error < self.lowest_error):
self.lowest_error = current_error
if (self.decay):
if (diff_error < self.min_derror_stop):
self.rate = 0.0
else:
self.rate *= self.scale_by
else:
if ((diff_error < self.min_derror_decay_start) and (self.epoch > self.min_epoch_decay_start)):
self.decay = True
self.rate *= self.scale_by
self.epoch += 1
return self.rate
class LearningMinLrate(LearningRate):
def __init__(self, start_rate = 0.08, scale_by = 0.5,
min_lrate_stop = 0.0002, init_error = 100,
decay=False, min_epoch_decay_start=15):
self.start_rate = start_rate
self.init_error = init_error
self.rate = start_rate
self.scale_by = scale_by
self.max_epochs = max_epochs
self.min_lrate_stop = min_lrate_stop
self.lowest_error = init_error
self.epoch = 1
self.decay = decay
self.min_epoch_decay_start = min_epoch_decay_start
def get_rate(self):
return self.rate
def get_next_rate(self, current_error):
diff_error = 0.0
diff_error = self.lowest_error - current_error
if (current_error < self.lowest_error):
self.lowest_error = current_error
if (self.decay):
if (self.rate < self.min_lrate_stop):
self.rate = 0.0
else:
self.rate *= self.scale_by
else:
if (self.epoch >= self.min_epoch_decay_start):
self.decay = True
self.rate *= self.scale_by
self.epoch += 1
return self.rate
class ExpDecreaseLearningRate(object):
def __init__(self, start_rate = 0.02, end_rate = 0.001, maximum_epoch = 5):
self.start_rate = start_rate
self.end_rate = end_rate
self.maximum_epoch = maximum_epoch
self.rate_diff = self.start_rate - self.end_rate
self.decrease_ratio = numpy.zeros((1, maximum_epoch+1))
for i in xrange(maximum_epoch):
self.decrease_ratio[0, i+1] = maximum_epoch - i
self.decrease_ratio = numpy.exp(self.decrease_ratio)
self.decrease_ratio /= numpy.sum(self.decrease_ratio)
self.decrease_ratio[0, 0] = 1.0
def get_rate(self, epoch):
if epoch < 0:
epoch = 0
current_rate = self.end_rate
if epoch <= self.maximum_epoch:
current_rate = self.end_rate + self.decrease_ratio[0, epoch] * self.rate_diff
return float(current_rate)
|
{
"content_hash": "67ed91dafea8bcbe057519241fa5cbac",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 106,
"avg_line_length": 28.75657894736842,
"alnum_prop": 0.5470144131777626,
"repo_name": "ronanki/merlin",
"id": "287ce2972f91ed6942675d7eedc8b53f4d36e1aa",
"size": "6749",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/utils/learn_rates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "10290"
},
{
"name": "Python",
"bytes": "1433129"
},
{
"name": "Scheme",
"bytes": "5478"
},
{
"name": "Shell",
"bytes": "58521"
}
],
"symlink_target": ""
}
|
"""
nozama-cloudsearch
"""
import pkg_resources
from pyramid.view import view_config
@view_config(route_name='home', request_method='GET', renderer='json')
@view_config(route_name='ping', request_method='GET', renderer='json')
def status(request):
"""This is used to 'ping' the web service to check if its running.
:returns: a status dict which the configured view will return as JSON.
The dict has the form::
dict(
status="ok",
name="<project name>",
version="<egg version of nozama.cloudsearch.service>"
)
"""
pkg = pkg_resources.get_distribution('nozama-cloudsearch')
return dict(
status="ok",
name="nozama-cloudsearch",
version=pkg.version,
)
|
{
"content_hash": "11bf8d367e7d615364931ff5eab19e47",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 74,
"avg_line_length": 23.75,
"alnum_prop": 0.6289473684210526,
"repo_name": "oisinmulvihill/nozama-cloudsearch",
"id": "1870266a02e759049b70e9f8aceae62f29f37c37",
"size": "784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nozama/cloudsearch/service/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "623"
},
{
"name": "HTML",
"bytes": "661"
},
{
"name": "Makefile",
"bytes": "2031"
},
{
"name": "Python",
"bytes": "48644"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import functools
import inspect
import os
import shutil
import sys
import tempfile
import re
from errno import ENOENT
from contextlib import contextmanager
from importlib import import_module
from numbers import Integral
from threading import Lock
import uuid
from weakref import WeakValueDictionary
from .compatibility import (get_named_args, getargspec, PY3, unicode,
bind_method, Iterator)
from .core import get_deps
from .optimization import key_split # noqa: F401
system_encoding = sys.getdefaultencoding()
if system_encoding == 'ascii':
system_encoding = 'utf-8'
def deepmap(func, *seqs):
""" Apply function inside nested lists
>>> inc = lambda x: x + 1
>>> deepmap(inc, [[1, 2], [3, 4]])
[[2, 3], [4, 5]]
>>> add = lambda x, y: x + y
>>> deepmap(add, [[1, 2], [3, 4]], [[10, 20], [30, 40]])
[[11, 22], [33, 44]]
"""
if isinstance(seqs[0], (list, Iterator)):
return [deepmap(func, *items) for items in zip(*seqs)]
else:
return func(*seqs)
def homogeneous_deepmap(func, seq):
if not seq:
return seq
n = 0
tmp = seq
while isinstance(tmp, list):
n += 1
tmp = tmp[0]
return ndeepmap(n, func, seq)
def ndeepmap(n, func, seq):
""" Call a function on every element within a nested container
>>> def inc(x):
... return x + 1
>>> L = [[1, 2], [3, 4, 5]]
>>> ndeepmap(2, inc, L)
[[2, 3], [4, 5, 6]]
"""
if n == 1:
return [func(item) for item in seq]
elif n > 1:
return [ndeepmap(n - 1, func, item) for item in seq]
elif isinstance(seq, list):
return func(seq[0])
else:
return func(seq)
@contextmanager
def ignoring(*exceptions):
try:
yield
except exceptions:
pass
def import_required(mod_name, error_msg):
"""Attempt to import a required dependency.
Raises a RuntimeError if the requested module is not available.
"""
try:
return import_module(mod_name)
except ImportError:
raise RuntimeError(error_msg)
@contextmanager
def tmpfile(extension='', dir=None):
extension = '.' + extension.lstrip('.')
handle, filename = tempfile.mkstemp(extension, dir=dir)
os.close(handle)
os.remove(filename)
try:
yield filename
finally:
if os.path.exists(filename):
if os.path.isdir(filename):
shutil.rmtree(filename)
else:
with ignoring(OSError):
os.remove(filename)
@contextmanager
def tmpdir(dir=None):
dirname = tempfile.mkdtemp(dir=dir)
try:
yield dirname
finally:
if os.path.exists(dirname):
if os.path.isdir(dirname):
with ignoring(OSError):
shutil.rmtree(dirname)
else:
with ignoring(OSError):
os.remove(dirname)
@contextmanager
def filetext(text, extension='', open=open, mode='w'):
with tmpfile(extension=extension) as filename:
f = open(filename, mode=mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield filename
@contextmanager
def changed_cwd(new_cwd):
old_cwd = os.getcwd()
os.chdir(new_cwd)
try:
yield
finally:
os.chdir(old_cwd)
@contextmanager
def tmp_cwd(dir=None):
with tmpdir(dir) as dirname:
with changed_cwd(dirname):
yield dirname
@contextmanager
def noop_context():
yield
class IndexCallable(object):
""" Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = 'fn',
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open, mode='t', use_tmpdir=True):
""" Dumps a number of textfiles to disk
d - dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
Since this is meant for use in tests, this context manager will
automatically switch to a temporary current directory, to avoid
race conditions when running tests in parallel.
"""
with (tmp_cwd() if use_tmpdir else noop_context()):
for filename, text in d.items():
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
f = open(filename, 'w' + mode)
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
with ignoring(OSError):
os.remove(filename)
def concrete(seq):
""" Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def pseudorandom(n, p, random_state=None):
""" Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], random_state=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], random_state=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
x = random_state.random_sample(n)
out = np.empty(n, dtype='i1')
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def random_state_data(n, random_state=None):
"""Return a list of arrays that can initialize
``np.random.RandomState``.
Parameters
----------
n : int
Number of arrays to return.
random_state : int or np.random.RandomState, optional
If an int, is used to seed a new ``RandomState``.
"""
import numpy as np
if not all(hasattr(random_state, attr) for attr in ['normal', 'beta', 'bytes', 'uniform']):
random_state = np.random.RandomState(random_state)
random_data = random_state.bytes(624 * n * 4) # `n * 624` 32-bit integers
l = list(np.frombuffer(random_data, dtype=np.uint32).reshape((n, -1)))
assert len(l) == n
return l
def is_integer(i):
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
return isinstance(i, Integral) or (isinstance(i, float) and i.is_integer())
ONE_ARITY_BUILTINS = set([abs, all, any, bool, bytearray, bytes, callable, chr,
classmethod, complex, dict, dir, enumerate, eval,
float, format, frozenset, hash, hex, id, int, iter,
len, list, max, min, next, oct, open, ord, range,
repr, reversed, round, set, slice, sorted,
staticmethod, str, sum, tuple,
type, vars, zip, memoryview])
if PY3:
ONE_ARITY_BUILTINS.add(ascii) # noqa: F821
MULTI_ARITY_BUILTINS = set([compile, delattr, divmod, filter, getattr, hasattr,
isinstance, issubclass, map, pow, setattr])
def takes_multiple_arguments(func, varargs=True):
""" Does this function take multiple arguments?
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> class Thing(object):
... def __init__(self, a): pass
>>> takes_multiple_arguments(Thing)
False
"""
if func in ONE_ARITY_BUILTINS:
return False
elif func in MULTI_ARITY_BUILTINS:
return True
try:
spec = getargspec(func)
except Exception:
return False
try:
is_constructor = spec.args[0] == 'self' and isinstance(func, type)
except Exception:
is_constructor = False
if varargs and spec.varargs:
return True
ndefaults = 0 if spec.defaults is None else len(spec.defaults)
return len(spec.args) - ndefaults - is_constructor > 1
class Dispatch(object):
"""Simple single dispatch."""
def __init__(self, name=None):
self._lookup = {}
self._lazy = {}
if name:
self.__name__ = name
def register(self, type, func=None):
"""Register dispatch of `func` on arguments of type `type`"""
def wrapper(func):
if isinstance(type, tuple):
for t in type:
self.register(t, func)
else:
self._lookup[type] = func
return func
return wrapper(func) if func is not None else wrapper
def register_lazy(self, toplevel, func=None):
"""
Register a registration function which will be called if the
*toplevel* module (e.g. 'pandas') is ever loaded.
"""
def wrapper(func):
self._lazy[toplevel] = func
return func
return wrapper(func) if func is not None else wrapper
def dispatch(self, cls):
"""Return the function implementation for the given ``cls``"""
# Fast path with direct lookup on cls
lk = self._lookup
try:
impl = lk[cls]
except KeyError:
pass
else:
return impl
# Is a lazy registration function present?
toplevel, _, _ = cls.__module__.partition('.')
try:
register = self._lazy.pop(toplevel)
except KeyError:
pass
else:
register()
return self.dispatch(cls) # recurse
# Walk the MRO and cache the lookup result
for cls2 in inspect.getmro(cls)[1:]:
if cls2 in lk:
lk[cls] = lk[cls2]
return lk[cls2]
raise TypeError("No dispatch for {0}".format(cls))
def __call__(self, arg, *args, **kwargs):
"""
Call the corresponding method based on type of argument.
"""
meth = self.dispatch(type(arg))
return meth(arg, *args, **kwargs)
@property
def __doc__(self):
try:
func = self.dispatch(object)
return func.__doc__
except TypeError:
return "Single Dispatch for %s" % self.__name__
def ensure_not_exists(filename):
"""
Ensure that a file does not exist.
"""
try:
os.unlink(filename)
except OSError as e:
if e.errno != ENOENT:
raise
def _skip_doctest(line):
# NumPy docstring contains cursor and comment only example
stripped = line.strip()
if stripped == '>>>' or stripped.startswith('>>> #'):
return stripped
elif '>>>' in stripped and '+SKIP' not in stripped:
if '# doctest:' in line:
return line + ', +SKIP'
else:
return line + ' # doctest: +SKIP'
else:
return line
def skip_doctest(doc):
if doc is None:
return ''
return '\n'.join([_skip_doctest(line) for line in doc.split('\n')])
def extra_titles(doc):
lines = doc.split('\n')
titles = {i: lines[i].strip() for i in range(len(lines) - 1)
if lines[i + 1] and all(c == '-' for c in lines[i + 1].strip())}
seen = set()
for i, title in sorted(titles.items()):
if title in seen:
new_title = 'Extra ' + title
lines[i] = lines[i].replace(title, new_title)
lines[i + 1] = lines[i + 1].replace('-' * len(title),
'-' * len(new_title))
else:
seen.add(title)
return '\n'.join(lines)
def ignore_warning(doc, cls, name):
l1 = "This docstring was copied from %s.%s.%s\n" % (cls.__module__, cls.__name__, name)
l2 = "Some inconsistencies with the Dask version may exist."
i = doc.find('\n\n')
if i == -1:
# No blank lines found
# Add our warning to the end with no blank line after
doc = '\n\n'.join([doc, '\n\n', l1, l2])
else:
# Insert our warning
head = doc[:i + 2]
tail = doc[i + 2:]
# Indentation of next line
indent = re.match(r'\s*', tail).group(0)
# Insert the warning, indented, with a blank line before and after
doc = ''.join([
head,
indent, l1,
indent, l2, '\n\n',
tail
])
return doc
def unsupported_arguments(doc, args):
""" Mark unsupported arguments with a disclaimer """
lines = doc.split('\n')
for arg in args:
subset = [(i, line) for i, line in enumerate(lines) if re.match(r'^\s*' + arg + ' ?:', line)]
if len(subset) == 1:
[(i, line)] = subset
lines[i] = line + " (Not supported in Dask)"
return '\n'.join(lines)
def _derived_from(cls, method, ua_args=[]):
""" Helper function for derived_from to ease testing """
# do not use wraps here, as it hides keyword arguments displayed
# in the doc
original_method = getattr(cls, method.__name__)
doc = original_method.__doc__
if doc is None:
doc = ''
# Insert disclaimer that this is a copied docstring
if doc:
doc = ignore_warning(doc, cls, method.__name__)
# Mark unsupported arguments
try:
method_args = get_named_args(method)
original_args = get_named_args(original_method)
not_supported = [m for m in original_args if m not in method_args]
except ValueError:
not_supported = []
if len(ua_args) > 0:
not_supported.extend(ua_args)
if len(not_supported) > 0:
doc = unsupported_arguments(doc, not_supported)
doc = skip_doctest(doc)
doc = extra_titles(doc)
return doc
def derived_from(original_klass, version=None, ua_args=[]):
"""Decorator to attach original class's docstring to the wrapped method.
Parameters
----------
original_klass: type
Original class which the method is derived from
version : str
Original package version which supports the wrapped method
ua_args : list
List of keywords which Dask doesn't support. Keywords existing in
original but not in Dask will automatically be added.
"""
def wrapper(method):
try:
method.__doc__ = _derived_from(original_klass, method, ua_args=ua_args)
return method
except AttributeError:
module_name = original_klass.__module__.split('.')[0]
@functools.wraps(method)
def wrapped(*args, **kwargs):
msg = "Base package doesn't support '{0}'.".format(method.__name__)
if version is not None:
msg2 = " Use {0} {1} or later to use this method."
msg += msg2.format(module_name, version)
raise NotImplementedError(msg)
return wrapped
return wrapper
def funcname(func):
"""Get the name of a function."""
# functools.partial
if isinstance(func, functools.partial):
return funcname(func.func)
# methodcaller
if isinstance(func, methodcaller):
return func.method
module_name = getattr(func, '__module__', None) or ''
type_name = getattr(type(func), '__name__', None) or ''
# toolz.curry
if 'toolz' in module_name and 'curry' == type_name:
return func.func_name
# multipledispatch objects
if 'multipledispatch' in module_name and 'Dispatcher' == type_name:
return func.name
# All other callables
try:
name = func.__name__
if name == '<lambda>':
return 'lambda'
return name
except AttributeError:
return str(func)
def typename(typ):
"""
Return the name of a type
Examples
--------
>>> typename(int)
'int'
>>> from dask.core import literal
>>> typename(literal)
'dask.core.literal'
"""
if not typ.__module__ or typ.__module__ == 'builtins':
return typ.__name__
else:
return typ.__module__ + '.' + typ.__name__
def ensure_bytes(s):
""" Turn string or bytes to bytes
>>> ensure_bytes(u'123')
b'123'
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
if hasattr(s, 'encode'):
return s.encode()
msg = "Object %s is neither a bytes object nor has an encode method"
raise TypeError(msg % s)
def ensure_unicode(s):
""" Turn string or bytes to bytes
>>> ensure_unicode(u'123')
'123'
>>> ensure_unicode('123')
'123'
>>> ensure_unicode(b'123')
'123'
"""
if isinstance(s, unicode):
return s
if hasattr(s, 'decode'):
return s.decode()
msg = "Object %s is neither a bytes object nor has an encode method"
raise TypeError(msg % s)
def digit(n, k, base):
"""
>>> digit(1234, 0, 10)
4
>>> digit(1234, 1, 10)
3
>>> digit(1234, 2, 10)
2
>>> digit(1234, 3, 10)
1
"""
return n // base**k % base
def insert(tup, loc, val):
"""
>>> insert(('a', 'b', 'c'), 0, 'x')
('x', 'b', 'c')
"""
L = list(tup)
L[loc] = val
return tuple(L)
def dependency_depth(dsk):
import toolz
deps, _ = get_deps(dsk)
@toolz.memoize
def max_depth_by_deps(key):
if not deps[key]:
return 1
d = 1 + max(max_depth_by_deps(dep_key) for dep_key in deps[key])
return d
return max(max_depth_by_deps(dep_key) for dep_key in deps.keys())
def memory_repr(num):
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f %s" % (num, x)
num /= 1024.0
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c))
for x, c in zip(zip(*rows), columns))
row_template = ('|' + (' %%-%ds |' * len(columns))) % widths
header = row_template % tuple(columns)
bar = '+%s+' % '+'.join('-' * (w + 2) for w in widths)
data = '\n'.join(row_template % r for r in rows)
return '\n'.join([bar, header, bar, data, bar])
def put_lines(buf, lines):
if any(not isinstance(x, unicode) for x in lines):
lines = [unicode(x) for x in lines]
buf.write('\n'.join(lines))
_method_cache = {}
class methodcaller(object):
"""
Return a callable object that calls the given method on its operand.
Unlike the builtin `operator.methodcaller`, instances of this class are
serializable
"""
__slots__ = ('method',)
func = property(lambda self: self.method) # For `funcname` to work
def __new__(cls, method):
if method in _method_cache:
return _method_cache[method]
self = object.__new__(cls)
self.method = method
_method_cache[method] = self
return self
def __call__(self, obj, *args, **kwargs):
return getattr(obj, self.method)(*args, **kwargs)
def __reduce__(self):
return (methodcaller, (self.method,))
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.method)
__repr__ = __str__
class itemgetter(object):
"""
Return a callable object that gets an item from the operand
Unlike the builtin `operator.itemgetter`, instances of this class are
serializable
"""
__slots__ = ('index',)
def __init__(self, index):
self.index = index
def __call__(self, x):
return x[self.index]
def __reduce__(self):
return (itemgetter, (self.index,))
def __eq__(self, other):
return type(self) is type(other) and self.index == other.index
class MethodCache(object):
"""Attribute access on this object returns a methodcaller for that
attribute.
Examples
--------
>>> a = [1, 3, 3]
>>> M.count(a, 3) == a.count(3)
True
"""
__getattr__ = staticmethod(methodcaller)
__dir__ = lambda self: list(_method_cache)
M = MethodCache()
class SerializableLock(object):
_locks = WeakValueDictionary()
""" A Serializable per-process Lock
This wraps a normal ``threading.Lock`` object and satisfies the same
interface. However, this lock can also be serialized and sent to different
processes. It will not block concurrent operations between processes (for
this you should look at ``multiprocessing.Lock`` or ``locket.lock_file``
but will consistently deserialize into the same lock.
So if we make a lock in one process::
lock = SerializableLock()
And then send it over to another process multiple times::
bytes = pickle.dumps(lock)
a = pickle.loads(bytes)
b = pickle.loads(bytes)
Then the deserialized objects will operate as though they were the same
lock, and collide as appropriate.
This is useful for consistently protecting resources on a per-process
level.
The creation of locks is itself not threadsafe.
"""
def __init__(self, token=None):
self.token = token or str(uuid.uuid4())
if self.token in SerializableLock._locks:
self.lock = SerializableLock._locks[self.token]
else:
self.lock = Lock()
SerializableLock._locks[self.token] = self.lock
def acquire(self, *args, **kwargs):
return self.lock.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
return self.lock.release(*args, **kwargs)
def __enter__(self):
self.lock.__enter__()
def __exit__(self, *args):
self.lock.__exit__(*args)
def locked(self):
return self.lock.locked()
def __getstate__(self):
return self.token
def __setstate__(self, token):
self.__init__(token)
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.token)
__repr__ = __str__
def get_scheduler_lock(collection=None, scheduler=None):
"""Get an instance of the appropriate lock for a certain situation based on
scheduler used."""
from . import multiprocessing
from .base import get_scheduler
actual_get = get_scheduler(collections=[collection],
scheduler=scheduler)
if actual_get == multiprocessing.get:
return multiprocessing.get_context().Manager().Lock()
return SerializableLock()
def ensure_dict(d):
if type(d) is dict:
return d
elif hasattr(d, 'dicts'):
result = {}
for dd in d.dicts.values():
result.update(dd)
return result
return dict(d)
class OperatorMethodMixin(object):
"""A mixin for dynamically implementing operators"""
@classmethod
def _bind_operator(cls, op):
""" bind operator to this class """
name = op.__name__
if name.endswith('_'):
# for and_ and or_
name = name[:-1]
elif name == 'inv':
name = 'invert'
meth = '__{0}__'.format(name)
if name in ('abs', 'invert', 'neg', 'pos'):
bind_method(cls, meth, cls._get_unary_operator(op))
else:
bind_method(cls, meth, cls._get_binary_operator(op))
if name in ('eq', 'gt', 'ge', 'lt', 'le', 'ne', 'getitem'):
return
rmeth = '__r{0}__'.format(name)
bind_method(cls, rmeth, cls._get_binary_operator(op, inv=True))
@classmethod
def _get_unary_operator(cls, op):
""" Must return a method used by unary operator """
raise NotImplementedError
@classmethod
def _get_binary_operator(cls, op, inv=False):
""" Must return a method used by binary operator """
raise NotImplementedError
def partial_by_order(*args, **kwargs):
"""
>>> from operator import add
>>> partial_by_order(5, function=add, other=[(1, 10)])
15
"""
function = kwargs.pop('function')
other = kwargs.pop('other')
args2 = list(args)
for i, arg in other:
args2.insert(i, arg)
return function(*args2, **kwargs)
def is_arraylike(x):
""" Is this object a numpy array or something similar?
Examples
--------
>>> import numpy as np
>>> x = np.ones(5)
>>> is_arraylike(x)
True
>>> is_arraylike(5)
False
>>> is_arraylike('cat')
False
"""
from .base import is_dask_collection
return (
hasattr(x, 'shape') and x.shape and
hasattr(x, 'dtype') and
not any(is_dask_collection(n) for n in x.shape)
)
def natural_sort_key(s):
"""
Sorting `key` function for performing a natural sort on a collection of
strings
See https://en.wikipedia.org/wiki/Natural_sort_order
Parameters
----------
s : str
A string that is an element of the collection being sorted
Returns
-------
tuple[str or int]
Tuple of the parts of the input string where each part is either a
string or an integer
Examples
--------
>>> a = ['f0', 'f1', 'f2', 'f8', 'f9', 'f10', 'f11', 'f19', 'f20', 'f21']
>>> sorted(a)
['f0', 'f1', 'f10', 'f11', 'f19', 'f2', 'f20', 'f21', 'f8', 'f9']
>>> sorted(a, key=natural_sort_key)
['f0', 'f1', 'f2', 'f8', 'f9', 'f10', 'f11', 'f19', 'f20', 'f21']
"""
return [int(part) if part.isdigit() else part
for part in re.split(r'(\d+)', s)]
def factors(n):
""" Return the factors of an integer
https://stackoverflow.com/a/6800214/616616
"""
seq = ([i, n // i] for i in range(1, int(pow(n, 0.5) + 1)) if n % i == 0)
return set(functools.reduce(list.__add__, seq))
def parse_bytes(s):
""" Parse byte string to numbers
>>> parse_bytes('100')
100
>>> parse_bytes('100 MB')
100000000
>>> parse_bytes('100M')
100000000
>>> parse_bytes('5kB')
5000
>>> parse_bytes('5.4 kB')
5400
>>> parse_bytes('1kiB')
1024
>>> parse_bytes('1e6')
1000000
>>> parse_bytes('1e6 kB')
1000000000
>>> parse_bytes('MB')
1000000
>>> parse_bytes('5 foos') # doctest: +SKIP
ValueError: Could not interpret 'foos' as a byte unit
"""
s = s.replace(' ', '')
if not s[0].isdigit():
s = '1' + s
for i in range(len(s) - 1, -1, -1):
if not s[i].isalpha():
break
index = i + 1
prefix = s[:index]
suffix = s[index:]
try:
n = float(prefix)
except ValueError:
raise ValueError("Could not interpret '%s' as a number" % prefix)
try:
multiplier = byte_sizes[suffix.lower()]
except KeyError:
raise ValueError("Could not interpret '%s' as a byte unit" % suffix)
result = n * multiplier
return int(result)
byte_sizes = {
'kB': 10**3,
'MB': 10**6,
'GB': 10**9,
'TB': 10**12,
'PB': 10**15,
'KiB': 2**10,
'MiB': 2**20,
'GiB': 2**30,
'TiB': 2**40,
'PiB': 2**50,
'B': 1,
'': 1,
}
byte_sizes = {k.lower(): v for k, v in byte_sizes.items()}
byte_sizes.update({k[0]: v for k, v in byte_sizes.items() if k and 'i' not in k})
byte_sizes.update({k[:-1]: v for k, v in byte_sizes.items() if k and 'i' in k})
def has_keyword(func, keyword):
try:
if PY3:
return keyword in inspect.signature(func).parameters
else:
if isinstance(func, functools.partial):
return keyword in inspect.getargspec(func.func).args
else:
return keyword in inspect.getargspec(func).args
except Exception:
return False
def ndimlist(seq):
if not isinstance(seq, (list, tuple)):
return 0
elif not seq:
return 1
else:
return 1 + ndimlist(seq[0])
|
{
"content_hash": "4adefe5db0c493d45757a4fcf62e0938",
"timestamp": "",
"source": "github",
"line_count": 1102,
"max_line_length": 101,
"avg_line_length": 26.058983666061707,
"alnum_prop": 0.5593550858376571,
"repo_name": "jcrist/dask",
"id": "87bec9d95ab89dd0548df5026b7621ebafd92bf1",
"size": "28717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dask/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5228"
},
{
"name": "Dockerfile",
"bytes": "1943"
},
{
"name": "Python",
"bytes": "2855507"
},
{
"name": "Shell",
"bytes": "6460"
}
],
"symlink_target": ""
}
|
import tempfile
import pytest
# this is require near the top to do setup of the test suite
# from counterpartylib.test import conftest
from counterpartylib.test.util_test import CURR_DIR as CPLIB_TESTDIR
from picopayments_hub import lib
from picopayments_hub import api
from pycoin.key.validate import is_address_valid
from micropayment_core.keys import address_from_wif
from counterpartylib.test.fixtures.params import DP
from micropayment_core import scripts
FIXTURE_SQL_FILE = CPLIB_TESTDIR + '/fixtures/scenarios/unittest_fixture.sql'
FIXTURE_DB = tempfile.gettempdir() + '/fixtures.unittest_fixture.db'
FUNDING_WIF = DP["addresses"][0][2] # XTC: 91950000000, BTC: 199909140
FUNDING_ADDRESS = address_from_wif(FUNDING_WIF)
def get_tx(txid):
return api.getrawtransaction(tx_hash=txid)
@pytest.mark.usefixtures("picopayments_server")
def test_get_funding_addresses():
assets = ["XCP"]
result = lib.get_funding_addresses(assets)
assert(assets == list(result.keys()))
assert(all([
is_address_valid(a, allowable_netcodes=["XTN"])
for a in result.values()
]))
@pytest.mark.usefixtures("picopayments_server")
def test_validate_read_unknown_asset():
terms = lib.get_terms(["deadbeef"])
assert(terms == {})
@pytest.mark.usefixtures("picopayments_server")
def test_no_keys_with_sufficient_asset():
# create asset
unsigned_rawtx = api.create_issuance(
source=FUNDING_ADDRESS,
asset="A7736697071037023001",
quantity=100000000
)
signed_rawtx = scripts.sign_deposit(get_tx, FUNDING_WIF,
unsigned_rawtx)
api.sendrawtransaction(tx_hex=signed_rawtx)
# fund server
for i in range(3):
addresses = lib.get_funding_addresses(["XCP", "A7736697071037023001"])
for asset, address in addresses.items():
unsigned_rawtx = api.create_send(**{
'source': FUNDING_ADDRESS,
'destination': address,
'asset': asset,
'quantity': 1000000,
'regular_dust_size': 1000000
})
signed_rawtx = scripts.sign_deposit(get_tx, FUNDING_WIF,
unsigned_rawtx)
api.sendrawtransaction(tx_hex=signed_rawtx)
key = lib.find_key_with_funds("XCP", 1000001, 1000000)
assert key is None
@pytest.mark.usefixtures("picopayments_server")
def test_no_keys_with_sufficient_btc():
# create asset
unsigned_rawtx = api.create_issuance(
source=FUNDING_ADDRESS,
asset="A7736697071037023001",
quantity=100000000
)
signed_rawtx = scripts.sign_deposit(get_tx, FUNDING_WIF, unsigned_rawtx)
api.sendrawtransaction(tx_hex=signed_rawtx)
# fund server
for i in range(3):
addresses = lib.get_funding_addresses(["XCP", "A7736697071037023001"])
for asset, address in addresses.items():
unsigned_rawtx = api.create_send(**{
'source': FUNDING_ADDRESS,
'destination': address,
'asset': asset,
'quantity': 1000000,
'regular_dust_size': 1000000
})
signed_rawtx = scripts.sign_deposit(get_tx, FUNDING_WIF,
unsigned_rawtx)
api.sendrawtransaction(tx_hex=signed_rawtx)
key = lib.find_key_with_funds("XCP", 1000000, 1000001)
assert key is None
|
{
"content_hash": "72de62c777fc7e69dd59779f5bc5546a",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 78,
"avg_line_length": 33.679611650485434,
"alnum_prop": 0.6344767944652637,
"repo_name": "StorjRND/picopayments",
"id": "38adce4debace3794abd5e6e51047dae5665e693",
"size": "3469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/lib_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3946"
},
{
"name": "PLpgSQL",
"bytes": "14584"
},
{
"name": "Python",
"bytes": "116730"
}
],
"symlink_target": ""
}
|
import os
import unittest
import json
import tempfile
from nose.tools import assert_equals, assert_true
from numpy import arange, array, array_equal, mod
from numpy import dtype as dtypeFunc
from test_utils import PySparkTestCaseWithOutputDir
from thunder import ThunderContext
_have_image = False
try:
from PIL import Image
_have_image = True
except ImportError:
# PIL not available; skip tests that require it
Image = None
class TestContextLoading(PySparkTestCaseWithOutputDir):
def setUp(self):
super(TestContextLoading, self).setUp()
self.tsc = ThunderContext(self.sc)
@staticmethod
def _findTestResourcesDir(resourcesDirName="resources"):
testDirPath = os.path.dirname(os.path.realpath(__file__))
testResourcesDirPath = os.path.join(testDirPath, resourcesDirName)
if not os.path.isdir(testResourcesDirPath):
raise IOError("Test resources directory "+testResourcesDirPath+" not found")
return testResourcesDirPath
def test_loadStacksAsSeriesWithShuffle(self):
rangeAry = arange(64*128, dtype=dtypeFunc('int16'))
filePath = os.path.join(self.outputdir, "rangeary.stack")
rangeAry.tofile(filePath)
expectedAry = rangeAry.reshape((128, 64), order='F')
rangeSeries = self.tsc.loadImagesAsSeries(filePath, dims=(128, 64))
assert_equals('float32', rangeSeries._dtype) # check before any potential first() calls update this val
rangeSeriesAry = rangeSeries.pack()
assert_equals((128, 64), rangeSeries.dims.count)
assert_equals((128, 64), rangeSeriesAry.shape)
assert_equals('float32', str(rangeSeriesAry.dtype))
assert_true(array_equal(expectedAry, rangeSeriesAry))
def test_load3dStackAsSeriesWithShuffle(self):
rangeAry = arange(32*64*4, dtype=dtypeFunc('int16'))
filePath = os.path.join(self.outputdir, "rangeary.stack")
rangeAry.tofile(filePath)
expectedAry = rangeAry.reshape((32, 64, 4), order='F')
rangeSeries = self.tsc.loadImagesAsSeries(filePath, dims=(32, 64, 4))
assert_equals('float32', rangeSeries._dtype)
rangeSeriesAry = rangeSeries.pack()
assert_equals((32, 64, 4), rangeSeries.dims.count)
assert_equals((32, 64, 4), rangeSeriesAry.shape)
assert_equals('float32', str(rangeSeriesAry.dtype))
assert_true(array_equal(expectedAry, rangeSeriesAry))
def __run_loadMultipleStacksAsSeries(self):
rangeAry = arange(64*128, dtype=dtypeFunc('int16'))
filePath = os.path.join(self.outputdir, "rangeary01.bin")
rangeAry.tofile(filePath)
expectedAry = rangeAry.reshape((128, 64), order='F')
rangeAry2 = arange(64*128, 2*64*128, dtype=dtypeFunc('int16'))
filePath = os.path.join(self.outputdir, "rangeary02.bin")
rangeAry2.tofile(filePath)
expectedAry2 = rangeAry2.reshape((128, 64), order='F')
rangeSeries = self.tsc.loadImagesAsSeries(self.outputdir, dims=(128, 64))
assert_equals('float32', rangeSeries._dtype)
rangeSeriesAry = rangeSeries.pack()
rangeSeriesAry_xpose = rangeSeries.pack(transpose=True)
assert_equals((128, 64), rangeSeries.dims.count)
assert_equals((2, 128, 64), rangeSeriesAry.shape)
assert_equals((2, 64, 128), rangeSeriesAry_xpose.shape)
assert_equals('float32', str(rangeSeriesAry.dtype))
assert_true(array_equal(expectedAry, rangeSeriesAry[0]))
assert_true(array_equal(expectedAry2, rangeSeriesAry[1]))
assert_true(array_equal(expectedAry.T, rangeSeriesAry_xpose[0]))
assert_true(array_equal(expectedAry2.T, rangeSeriesAry_xpose[1]))
def test_loadMultipleMultipointStacksAsSeries(self):
rangeAry = arange(64*128, dtype=dtypeFunc('int16'))
filePath = os.path.join(self.outputdir, "rangeary01.bin")
rangeAry.tofile(filePath)
expectedAry = rangeAry.reshape((32, 32, 8), order='F')
rangeAry2 = arange(64*128, 2*64*128, dtype=dtypeFunc('int16'))
filePath = os.path.join(self.outputdir, "rangeary02.bin")
rangeAry2.tofile(filePath)
expectedAry2 = rangeAry2.reshape((32, 32, 8), order='F')
rangeSeries = self.tsc.loadImagesAsSeries(self.outputdir, dims=(32, 32, 8), nplanes=2)
assert_equals('float32', rangeSeries._dtype)
rangeSeriesAry = rangeSeries.pack()
assert_equals((32, 32, 2), rangeSeries.dims.count)
assert_equals((8, 32, 32, 2), rangeSeriesAry.shape)
assert_equals('float32', str(rangeSeriesAry.dtype))
assert_true(array_equal(expectedAry[:, :, :2], rangeSeriesAry[0]))
assert_true(array_equal(expectedAry[:, :, 2:4], rangeSeriesAry[1]))
assert_true(array_equal(expectedAry[:, :, 4:6], rangeSeriesAry[2]))
assert_true(array_equal(expectedAry[:, :, 6:], rangeSeriesAry[3]))
assert_true(array_equal(expectedAry2[:, :, :2], rangeSeriesAry[4]))
assert_true(array_equal(expectedAry2[:, :, 2:4], rangeSeriesAry[5]))
assert_true(array_equal(expectedAry2[:, :, 4:6], rangeSeriesAry[6]))
assert_true(array_equal(expectedAry2[:, :, 6:], rangeSeriesAry[7]))
@unittest.skipIf(not _have_image, "PIL/pillow not installed or not functional")
def __run_loadTifAsSeries(self):
tmpAry = arange(60*120, dtype=dtypeFunc('uint16'))
rangeAry = mod(tmpAry, 255).astype('uint8').reshape((60, 120))
pilImg = Image.fromarray(rangeAry)
filePath = os.path.join(self.outputdir, "rangetif01.tif")
pilImg.save(filePath)
del pilImg, tmpAry
rangeSeries = self.tsc.loadImagesAsSeries(self.outputdir, inputFormat="tif-stack")
assert_equals('float16', rangeSeries._dtype) # check before any potential first() calls update this val
rangeSeriesAry = rangeSeries.pack()
assert_equals((60, 120), rangeSeries.dims.count) # 2d tif now loaded as 2d image; was 3d with singleton z dim
assert_equals((60, 120), rangeSeriesAry.shape)
assert_equals('float16', str(rangeSeriesAry.dtype))
assert_true(array_equal(rangeAry, rangeSeriesAry))
@unittest.skipIf(not _have_image, "PIL/pillow not installed or not functional")
def test_loadTestTifAsSeriesWithShuffle(self):
testResourcesDir = TestContextLoading._findTestResourcesDir()
imagePath = os.path.join(testResourcesDir, "multilayer_tif", "dotdotdot_lzw.tif")
testimg_pil = Image.open(imagePath)
testimg_arys = list()
testimg_arys.append(array(testimg_pil)) # original shape 70, 75
testimg_pil.seek(1)
testimg_arys.append(array(testimg_pil))
testimg_pil.seek(2)
testimg_arys.append(array(testimg_pil))
rangeSeries = self.tsc.loadImagesAsSeries(imagePath, inputFormat="tif-stack")
assert_true(rangeSeries._dtype.startswith("float"))
rangeSeriesAry = rangeSeries.pack()
rangeSeriesAry_xpose = rangeSeries.pack(transpose=True)
assert_equals((70, 75, 3), rangeSeries.dims.count)
assert_equals((70, 75, 3), rangeSeriesAry.shape)
assert_equals((3, 75, 70), rangeSeriesAry_xpose.shape)
assert_true(rangeSeriesAry.dtype.kind == "f")
assert_true(array_equal(testimg_arys[0], rangeSeriesAry[:, :, 0]))
assert_true(array_equal(testimg_arys[1], rangeSeriesAry[:, :, 1]))
assert_true(array_equal(testimg_arys[2], rangeSeriesAry[:, :, 2]))
assert_true(array_equal(testimg_arys[0].T, rangeSeriesAry_xpose[0]))
assert_true(array_equal(testimg_arys[1].T, rangeSeriesAry_xpose[1]))
assert_true(array_equal(testimg_arys[2].T, rangeSeriesAry_xpose[2]))
@unittest.skipIf(not _have_image, "PIL/pillow not installed or not functional")
def test_loadMultipleTifsAsSeriesWithShuffle(self):
tmpAry = arange(60*120, dtype=dtypeFunc('uint16'))
rangeAry = mod(tmpAry, 255).astype('uint8').reshape((60, 120))
pilImg = Image.fromarray(rangeAry)
filePath = os.path.join(self.outputdir, "rangetif01.tif")
pilImg.save(filePath)
tmpAry = arange(60*120, 2*60*120, dtype=dtypeFunc('uint16'))
rangeAry2 = mod(tmpAry, 255).astype('uint8').reshape((60, 120))
pilImg = Image.fromarray(rangeAry2)
filePath = os.path.join(self.outputdir, "rangetif02.tif")
pilImg.save(filePath)
del pilImg, tmpAry
rangeSeries = self.tsc.loadImagesAsSeries(self.outputdir, inputFormat="tif-stack")
assert_equals('float16', rangeSeries._dtype)
rangeSeriesAry = rangeSeries.pack()
rangeSeriesAry_xpose = rangeSeries.pack(transpose=True)
assert_equals((60, 120), rangeSeries.dims.count) # 2d tif now loaded as 2d image; was 3d with singleton z dim
assert_equals((2, 60, 120), rangeSeriesAry.shape)
assert_equals((2, 120, 60), rangeSeriesAry_xpose.shape)
assert_equals('float16', str(rangeSeriesAry.dtype))
assert_true(array_equal(rangeAry, rangeSeriesAry[0]))
assert_true(array_equal(rangeAry2, rangeSeriesAry[1]))
assert_true(array_equal(rangeAry.T, rangeSeriesAry_xpose[0]))
assert_true(array_equal(rangeAry2.T, rangeSeriesAry_xpose[1]))
@unittest.skipIf(not _have_image, "PIL/pillow not installed or not functional")
def test_loadMultipleMultipointTifsAsSeries(self):
testResourcesDir = TestContextLoading._findTestResourcesDir()
imagesPath = os.path.join(testResourcesDir, "multilayer_tif", "dotdotdot_lzw*.tif")
# load only one file, second is a copy of this one
testimg_pil = Image.open(os.path.join(testResourcesDir, "multilayer_tif", "dotdotdot_lzw.tif"))
testimg_arys = [array(testimg_pil)]
for idx in xrange(1, 3):
testimg_pil.seek(idx)
testimg_arys.append(array(testimg_pil))
rangeSeries = self.tsc.loadImagesAsSeries(imagesPath, inputFormat="tif-stack", nplanes=1)
assert_equals((70, 75), rangeSeries.dims.count)
rangeSeriesAry = rangeSeries.pack()
assert_equals((6, 70, 75), rangeSeriesAry.shape)
for idx in xrange(6):
assert_true(array_equal(testimg_arys[idx % 3], rangeSeriesAry[idx]))
@staticmethod
def _tempFileWithPaths(f, blob):
f.write(blob)
f.flush()
return f.name
def test_loadParams(self):
params = json.dumps({"name": "test1", "value": [1, 2, 3]})
f = tempfile.NamedTemporaryFile()
path = TestContextLoading._tempFileWithPaths(f, params)
d = self.tsc.loadParams(path)
assert(d.names() == ["test1"])
assert(array_equal(d.values(), [1, 2, 3]))
params = json.dumps([{"name": "test0", "value": [1, 2, 3]},
{"name": "test1", "value": [4, 5, 6]}])
f = tempfile.NamedTemporaryFile()
path = TestContextLoading._tempFileWithPaths(f, params)
d = self.tsc.loadParams(path)
assert(d.names() == ["test0", "test1"])
assert(array_equal(d.values(), [[1, 2, 3], [4, 5, 6]]))
assert(array_equal(d.values("test0"), [1, 2, 3]))
def test_loadSeriesFromArray(self):
target = array([[0, 1], [0, 2]])
d1 = self.tsc.loadSeriesFromArray([[0, 1], [0, 2]])
d2 = self.tsc.loadSeriesFromArray(array([[0, 1], [0, 2]]))
assert(array_equal(d1.collectValuesAsArray(), target))
assert(d1.keys().collect(), [(0,), (1,)])
assert(array_equal(d2.collectValuesAsArray(), target))
assert(d2.keys().collect(), [(0,), (1,)])
target = array([[0, 1]])
d1 = self.tsc.loadSeriesFromArray([0, 1])
d2 = self.tsc.loadSeriesFromArray(array([0, 1]))
assert(array_equal(d1.collectValuesAsArray(), target))
assert(d1.keys().collect(), [(0,)])
assert(array_equal(d2.collectValuesAsArray(), target))
assert(d2.keys().collect(), [(0,)])
def test_loadImagesFromArray(self):
target = array([[[0, 1], [0, 2]]])
d1 = self.tsc.loadImagesFromArray([[0, 1], [0, 2]])
d2 = self.tsc.loadImagesFromArray(array([[0, 1], [0, 2]]))
assert(array_equal(d1.collectValuesAsArray(), target))
assert(d1.keys().collect() == [0])
assert(array_equal(d2.collectValuesAsArray(), target))
assert(d2.keys().collect() == [0])
target = array([[[0, 1], [0, 2]], [[0, 1], [0, 2]]])
d1 = self.tsc.loadImagesFromArray([[[0, 1], [0, 2]], [[0, 1], [0, 2]]])
d2 = self.tsc.loadImagesFromArray(array([[[0, 1], [0, 2]], [[0, 1], [0, 2]]]))
assert(array_equal(d1.collectValuesAsArray(), target))
assert(d1.keys().collect() == [0, 1])
assert(array_equal(d2.collectValuesAsArray(), target))
assert(d2.keys().collect() == [0, 1])
class TestContextWriting(PySparkTestCaseWithOutputDir):
def setUp(self):
super(TestContextWriting, self).setUp()
self.tsc = ThunderContext(self.sc)
def test_export_npy(self):
from numpy import load
a = array([[1, 2], [2, 3]])
filename = self.outputdir + "/test.npy"
self.tsc.export(a, filename)
aa = load(filename)
assert(array_equal(aa, a))
filename = self.outputdir + "/test"
self.tsc.export(a, filename, outputFormat="npy", overwrite=True)
aa = load(filename + ".npy")
assert(array_equal(aa, a))
def test_export_mat(self):
from scipy.io import loadmat
a = array([[1, 2], [2, 3]])
filename = self.outputdir + "/test.mat"
self.tsc.export(a, filename)
aa = loadmat(filename)
assert(array_equal(aa['test'], a))
filename = self.outputdir + "/test"
self.tsc.export(a, filename, outputFormat="mat", overwrite=True)
aa = loadmat(filename + ".mat")
assert(array_equal(aa['test'], a))
filename = self.outputdir + "/test"
self.tsc.export(a, filename, outputFormat="mat", varname="tmp", overwrite=True)
aa = loadmat(filename + ".mat")
assert(array_equal(aa['tmp'], a))
def test_export_txt(self):
from numpy import loadtxt
a = array([[1, 2], [2, 3]])
filename = self.outputdir + "/test.txt"
self.tsc.export(a, filename)
aa = loadtxt(filename)
assert(array_equal(aa, a))
filename = self.outputdir + "/test"
self.tsc.export(a, filename, outputFormat="txt", overwrite=True)
aa = loadtxt(filename + ".txt")
assert(array_equal(aa, a))
class TestLoadIrregularImages(PySparkTestCaseWithOutputDir):
def setUp(self):
super(TestLoadIrregularImages, self).setUp()
self.tsc = ThunderContext(self.sc)
def _generate_array(self, dtype):
self.ary = arange(256, dtype=dtypeFunc(dtype)).reshape((16, 4, 4)) # 16 pages of 4x4 images
def _write_tiffs(self):
import thunder.rdds.fileio.tifffile as tifffile
writer1 = tifffile.TiffWriter(os.path.join(self.outputdir, "tif01.tif"))
writer1.save(self.ary[:8].transpose((0, 2, 1)), photometric="minisblack") # write out 8 pages
writer1.close()
del writer1
writer2 = tifffile.TiffWriter(os.path.join(self.outputdir, "tif02.tif"))
writer2.save(self.ary.transpose((0, 2, 1)), photometric="minisblack") # write out all 16 pages
writer2.close()
del writer2
def _write_stacks(self):
with open(os.path.join(self.outputdir, "stack01.bin"), "w") as f:
self.ary[:8].tofile(f)
with open(os.path.join(self.outputdir, "stack02.bin"), "w") as f:
self.ary.tofile(f)
def _run_tst(self, imgType, dtype):
self._generate_array(dtype)
if imgType.lower().startswith('tif'):
self._write_tiffs()
inputFormat, ext, dims = "tif", "tif", None
elif imgType.lower().startswith("stack"):
self._write_stacks()
inputFormat, ext, dims = "stack", "bin", (16, 4, 4)
else:
raise ValueError("Unknown imgType: %s" % imgType)
# with nplanes=2, this should yield a 12 record Images object, which after converting to
# a series and packing should be a 12 x 4 x 4 x 2 array.
# renumber=True is required in this case in order to ensure sensible results.
series = self.tsc.loadImagesAsSeries(self.outputdir, inputFormat=inputFormat, ext=ext,
blockSize=(2, 1, 1), blockSizeUnits="pixels",
nplanes=2, dims=dims, renumber=True)
packedAry = series.pack()
assert_equals((12, 4, 4, 2), packedAry.shape)
assert_true(array_equal(self.ary[0:2], packedAry[0].T))
assert_true(array_equal(self.ary[2:4], packedAry[1].T))
assert_true(array_equal(self.ary[4:6], packedAry[2].T))
assert_true(array_equal(self.ary[6:8], packedAry[3].T)) # first image was only 4 2-plane records
assert_true(array_equal(self.ary[0:2], packedAry[4].T))
assert_true(array_equal(self.ary[2:4], packedAry[5].T))
assert_true(array_equal(self.ary[4:6], packedAry[6].T))
assert_true(array_equal(self.ary[6:8], packedAry[7].T))
assert_true(array_equal(self.ary[8:10], packedAry[8].T))
assert_true(array_equal(self.ary[10:12], packedAry[9].T))
assert_true(array_equal(self.ary[12:14], packedAry[10].T))
assert_true(array_equal(self.ary[14:16], packedAry[11].T))
def test_loadMultipleSignedIntTifsAsSeries(self):
self._run_tst('tif', 'int16')
def test_loadMultipleUnsignedIntTifsAsSeries(self):
self._run_tst('tif', 'uint16')
# can't currently have binary stack files of different sizes, since we have
# fixed `dims` for all stacks. leaving in place b/c it seems like something
# to support soon.
# def test_loadMultipleBinaryStacksAsSeries(self):
# self._run_tst('stack', 'uint16')
|
{
"content_hash": "8e0f02ea8ad73becdff04066847baff7",
"timestamp": "",
"source": "github",
"line_count": 407,
"max_line_length": 118,
"avg_line_length": 44.223587223587224,
"alnum_prop": 0.6368131562864604,
"repo_name": "zhwa/thunder",
"id": "bc2043ca7c7ba53d55380f17f257c1cca4098d9b",
"size": "17999",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "test/test_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "958319"
},
{
"name": "Shell",
"bytes": "2439"
}
],
"symlink_target": ""
}
|
import os
import unittest
import pytest
from pyspark.sql.functions import split
from sparknlp.annotator import *
from sparknlp.base import *
from test.util import SparkSessionForTest
@pytest.mark.slow
class MultiClassifierDLTestSpec(unittest.TestCase):
def setUp(self):
self.data = SparkSessionForTest.spark.read.option("header", "true") \
.csv(path="file:///" + os.getcwd() + "/../src/test/resources/classifier/e2e.csv") \
.withColumn("labels", split("mr", ", ")) \
.drop("mr")
def runTest(self):
document_assembler = DocumentAssembler() \
.setInputCol("ref") \
.setOutputCol("document")
sentence_embeddings = BertSentenceEmbeddings.pretrained("sent_small_bert_L2_128") \
.setInputCols("document") \
.setOutputCol("sentence_embeddings")
multi_classifier = MultiClassifierDLApproach() \
.setInputCols("sentence_embeddings") \
.setOutputCol("category") \
.setLabelColumn("labels") \
.setBatchSize(64) \
.setMaxEpochs(20) \
.setLr(0.001) \
.setThreshold(0.5) \
.setRandomSeed(44)
pipeline = Pipeline(stages=[
document_assembler,
sentence_embeddings,
multi_classifier
])
model = pipeline.fit(self.data)
model.stages[-1].write().overwrite().save('./tmp_multiClassifierDL_model')
multi_classsifierdl_model = MultiClassifierDLModel.load("./tmp_multiClassifierDL_model") \
.setInputCols(["sentence_embeddings"]) \
.setOutputCol("class")
print(multi_classsifierdl_model.getClasses())
|
{
"content_hash": "53fa37635ba5ca1a9cd41bae89ed6a19",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 98,
"avg_line_length": 32.37735849056604,
"alnum_prop": 0.6083916083916084,
"repo_name": "JohnSnowLabs/spark-nlp",
"id": "c09712cfbf121fd8dcb0056029d6506522c8377f",
"size": "2308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test/annotator/classifier_dl/multi_classifier_dl_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "14452"
},
{
"name": "Java",
"bytes": "223289"
},
{
"name": "Makefile",
"bytes": "819"
},
{
"name": "Python",
"bytes": "1694517"
},
{
"name": "Scala",
"bytes": "4116435"
},
{
"name": "Shell",
"bytes": "5286"
}
],
"symlink_target": ""
}
|
from six import iteritems
class Annotations(object):
"""Handles and stores (key,value) annotations for formulae"""
def __init__(self, initial_annotations=None):
if initial_annotations is not None:
self._annotations = initial_annotations
else:
self._annotations = {}
def add(self, formula, annotation, value=None):
"""Adds an annotation for the given formula, possibly with the
specified value"""
term_annotations = self._annotations.setdefault(formula, {})
values = term_annotations.setdefault(annotation, set())
if value is not None:
values.add(value)
def remove(self, formula):
"""Removes all the annotations for the given formula"""
if formula in self._annotations:
del self._annotations[formula]
def remove_annotation(self, formula, annotation):
"""Removes the given annotation for the given formula"""
if formula in self._annotations:
if annotation in self._annotations[formula]:
del self._annotations[formula][annotation]
def remove_value(self, formula, annotation, value):
"""Removes the given annotation for the given formula"""
if formula in self._annotations:
if annotation in self._annotations[formula]:
d = self._annotations[formula][annotation]
if value in d:
d.remove(value)
def has_annotation(self, formula, annotation, value=None):
"""Returns True iff the given formula has the given annotation. If
Value is specified, True is returned only if the value is
matching.
"""
if formula in self._annotations:
if annotation in self._annotations[formula]:
if value is None:
return True
else:
return (value in self._annotations[formula][annotation])
return False
def annotations(self, formula):
"""Returns a dictionary containing all the annotations for the given
formula as keys and the respective values. None is returned if
formula has no annotations.
"""
try:
return self._annotations[formula]
except KeyError:
return None
def all_annotated_formulae(self, annotation, value=None):
"""Returns the set of all the formulae having the given annotation
key. If Value is specified, only the formula having the
specified value are returned.
"""
res = []
for f,amap in iteritems(self._annotations):
if annotation in amap:
if value is None:
res.append(f)
else:
if value in amap[annotation]:
res.append(f)
return set(res)
def __contains__(self, formula):
"""Checks if formula has at least one annotation"""
return formula in self._annotations
def __str__(self):
res = ["Annotations: {"]
for t, m in iteritems(self._annotations):
res.append(str(t) + " -> ")
for a, lst in iteritems(m):
res.append(":" + str(a) + "{")
for v in lst:
res.append(str(v) + ", ")
res.append("} ")
return "".join(res + ["}"])
def __getitem__(self, formula):
return self.annotations(formula)
|
{
"content_hash": "ad4c0afb1ebd7cc3acbf1970a68435f1",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 76,
"avg_line_length": 34.53465346534654,
"alnum_prop": 0.5739678899082569,
"repo_name": "chubbymaggie/pysmt",
"id": "aa9b8f675e9c28db21d5e9eb891747f848cb798d",
"size": "4139",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pysmt/smtlib/annotations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "PowerShell",
"bytes": "5987"
},
{
"name": "Python",
"bytes": "741089"
},
{
"name": "SMT",
"bytes": "4905619"
},
{
"name": "Shell",
"bytes": "3945"
}
],
"symlink_target": ""
}
|
from pytest import mark, param
from .conftest import read_json_sample
@mark.parametrize(
"query_string,expected_show_inactive",
[param("", False), param("show_inactive=yes", True)],
)
def test_ratings_ad(service, query_string: str, expected_show_inactive: bool):
uri = "/ratings/ad/"
uri += "?" + query_string if query_string else ""
resp = service.get(uri)
context = resp.context
assert "request" in context
assert "current_page" in context
assert "response" in context
assert "gametype" in context
assert context["gametype"] == "ad"
assert context["current_page"] == 0
assert context["show_inactive"] == expected_show_inactive
assert context["response"] == read_json_sample("ratings_ad")
def test_ratings_ad_json(service):
resp = service.get("/ratings/ad/0.json")
assert resp.json()["response"] == read_json_sample("ratings_ad")
|
{
"content_hash": "3d3df408259cf5be26b0fcaa84a48968",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 78,
"avg_line_length": 30.1,
"alnum_prop": 0.6722037652270211,
"repo_name": "em92/pickup-rating",
"id": "ef7322f6f1ab786e7f6fe3cd21c7cb106db09d6a",
"size": "903",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_ratings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1311"
},
{
"name": "HTML",
"bytes": "1106"
},
{
"name": "JavaScript",
"bytes": "22578"
},
{
"name": "Python",
"bytes": "6412"
}
],
"symlink_target": ""
}
|
"""Checks for optional dependencies using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
import importlib
from collections.abc import Sequence
# First, the top-level packages:
# TODO: This list is a duplicate of the dependencies in setup.cfg "all", but
# some of the package names are different from the pip-install name (e.g.,
# beautifulsoup4 -> bs4).
_optional_deps = ['h5py', 'sympy', 'tqdm', 'twobody']
_deps = {k.upper(): k for k in _optional_deps}
# Any subpackages that have different import behavior:
_deps['MATPLOTLIB'] = ('matplotlib', 'matplotlib.pyplot')
_deps['GALPY'] = ('galpy', 'galpy.orbit', 'galpy.potential')
__all__ = [f"HAS_{pkg}" for pkg in _deps]
def __getattr__(name):
if name in __all__:
module_name = name[4:]
modules = _deps[module_name]
if not isinstance(modules, Sequence) or isinstance(modules, str):
modules = [modules]
for module in modules:
try:
importlib.import_module(module)
except (ImportError, ModuleNotFoundError):
return False
return True
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}.")
|
{
"content_hash": "5e0aff4bd59b006cb8c008e0234d0640",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 76,
"avg_line_length": 33.611111111111114,
"alnum_prop": 0.6396694214876033,
"repo_name": "adrn/gala",
"id": "2be4015767e311b4fe4b535619a97ac79d1625cc",
"size": "1210",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "gala/tests/optional_deps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "176465"
},
{
"name": "C++",
"bytes": "7004"
},
{
"name": "Cython",
"bytes": "154003"
},
{
"name": "Python",
"bytes": "714200"
},
{
"name": "TeX",
"bytes": "3702"
}
],
"symlink_target": ""
}
|
"""Generates various info tables from SPIR-V JSON grammar."""
import errno
import json
import os.path
import re
# Prefix for all C variables generated by this script.
PYGEN_VARIABLE_PREFIX = 'pygen_variable'
# Extensions to recognize, but which don't necessarily come from the SPIR-V
# core or KHR grammar files. Get this list from the SPIR-V registry web page.
# NOTE: Only put things on this list if it is not in those grammar files.
EXTENSIONS_FROM_SPIRV_REGISTRY_AND_NOT_FROM_GRAMMARS = """
SPV_AMD_gcn_shader
SPV_AMD_gpu_shader_half_float
SPV_AMD_gpu_shader_int16
SPV_AMD_shader_trinary_minmax
SPV_KHR_non_semantic_info
"""
OUTPUT_LANGUAGE = 'c'
def make_path_to_file(f):
"""Makes all ancestor directories to the given file, if they don't yet
exist.
Arguments:
f: The file whose ancestor directories are to be created.
"""
dir = os.path.dirname(os.path.abspath(f))
try:
os.makedirs(dir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(dir):
pass
else:
raise
def convert_min_required_version(version):
"""Converts the minimal required SPIR-V version encoded in the grammar to
the symbol in SPIRV-Tools."""
if version is None:
return 'SPV_SPIRV_VERSION_WORD(1, 0)'
if version == 'None':
return '0xffffffffu'
return 'SPV_SPIRV_VERSION_WORD({})'.format(version.replace('.', ','))
def convert_max_required_version(version):
"""Converts the maximum required SPIR-V version encoded in the grammar to
the symbol in SPIRV-Tools."""
if version is None:
return '0xffffffffu'
return 'SPV_SPIRV_VERSION_WORD({})'.format(version.replace('.', ','))
def compose_capability_list(caps):
"""Returns a string containing a braced list of capabilities as enums.
Arguments:
- caps: a sequence of capability names
Returns:
a string containing the braced list of SpvCapability* or spv::Capability:: enums named by caps.
"""
base_string = 'SpvCapability'
global OUTPUT_LANGUAGE
if OUTPUT_LANGUAGE == 'c++':
base_string = 'spv::Capability::'
return '{' + ', '.join([(base_string + '{}').format(c) for c in caps]) + '}'
def get_capability_array_name(caps):
"""Returns the name of the array containing all the given capabilities.
Args:
- caps: a sequence of capability names
"""
if not caps:
return 'nullptr'
return '{}_caps_{}'.format(PYGEN_VARIABLE_PREFIX, ''.join(caps))
def generate_capability_arrays(caps):
"""Returns the arrays of capabilities.
Arguments:
- caps: a sequence of sequence of capability names
"""
caps = sorted(set([tuple(c) for c in caps if c]))
cap_str = 'SpvCapability'
global OUTPUT_LANGUAGE
if OUTPUT_LANGUAGE == 'c++':
cap_str = 'spv::Capability'
arrays = [
'static const ' + cap_str + ' {}[] = {};'.format(
get_capability_array_name(c), compose_capability_list(c))
for c in caps]
return '\n'.join(arrays)
def compose_extension_list(exts):
"""Returns a string containing a braced list of extensions as enums.
Arguments:
- exts: a sequence of extension names
Returns:
a string containing the braced list of extensions named by exts.
"""
return '{' + ', '.join(
['spvtools::Extension::k{}'.format(e) for e in exts]) + '}'
def get_extension_array_name(extensions):
"""Returns the name of the array containing all the given extensions.
Args:
- extensions: a sequence of extension names
"""
if not extensions:
return 'nullptr'
else:
return '{}_exts_{}'.format(
PYGEN_VARIABLE_PREFIX, ''.join(extensions))
def generate_extension_arrays(extensions):
"""Returns the arrays of extensions.
Arguments:
- caps: a sequence of sequence of extension names
"""
extensions = sorted(set([tuple(e) for e in extensions if e]))
arrays = [
'static const spvtools::Extension {}[] = {};'.format(
get_extension_array_name(e), compose_extension_list(e))
for e in extensions]
return '\n'.join(arrays)
def convert_operand_kind(operand_tuple):
"""Returns the corresponding operand type used in spirv-tools for the given
operand kind and quantifier used in the JSON grammar.
Arguments:
- operand_tuple: a tuple of two elements:
- operand kind: used in the JSON grammar
- quantifier: '', '?', or '*'
Returns:
a string of the enumerant name in spv_operand_type_t
"""
kind, quantifier = operand_tuple
# The following cases are where we differ between the JSON grammar and
# spirv-tools.
if kind == 'IdResultType':
kind = 'TypeId'
elif kind == 'IdResult':
kind = 'ResultId'
elif kind == 'IdMemorySemantics' or kind == 'MemorySemantics':
kind = 'MemorySemanticsId'
elif kind == 'IdScope' or kind == 'Scope':
kind = 'ScopeId'
elif kind == 'IdRef':
kind = 'Id'
elif kind == 'ImageOperands':
kind = 'Image'
elif kind == 'Dim':
kind = 'Dimensionality'
elif kind == 'ImageFormat':
kind = 'SamplerImageFormat'
elif kind == 'KernelEnqueueFlags':
kind = 'KernelEnqFlags'
elif kind == 'LiteralExtInstInteger':
kind = 'ExtensionInstructionNumber'
elif kind == 'LiteralSpecConstantOpInteger':
kind = 'SpecConstantOpNumber'
elif kind == 'LiteralContextDependentNumber':
kind = 'TypedLiteralNumber'
elif kind == 'PairLiteralIntegerIdRef':
kind = 'LiteralIntegerId'
elif kind == 'PairIdRefLiteralInteger':
kind = 'IdLiteralInteger'
elif kind == 'PairIdRefIdRef': # Used by OpPhi in the grammar
kind = 'Id'
if kind == 'FPRoundingMode':
kind = 'FpRoundingMode'
elif kind == 'FPFastMathMode':
kind = 'FpFastMathMode'
if quantifier == '?':
kind = 'Optional{}'.format(kind)
elif quantifier == '*':
kind = 'Variable{}'.format(kind)
return 'SPV_OPERAND_TYPE_{}'.format(
re.sub(r'([a-z])([A-Z])', r'\1_\2', kind).upper())
class InstInitializer(object):
"""Instances holds a SPIR-V instruction suitable for printing as the
initializer for spv_opcode_desc_t."""
def __init__(self, opname, caps, exts, operands, version, lastVersion):
"""Initialization.
Arguments:
- opname: opcode name (with the 'Op' prefix)
- caps: a sequence of capability names required by this opcode
- exts: a sequence of names of extensions enabling this enumerant
- operands: a sequence of (operand-kind, operand-quantifier) tuples
- version: minimal SPIR-V version required for this opcode
- lastVersion: last version of SPIR-V that includes this opcode
"""
assert opname.startswith('Op')
self.opname = opname[2:] # Remove the "Op" prefix.
self.num_caps = len(caps)
self.caps_mask = get_capability_array_name(caps)
self.num_exts = len(exts)
self.exts = get_extension_array_name(exts)
self.operands = [convert_operand_kind(o) for o in operands]
self.fix_syntax()
operands = [o[0] for o in operands]
self.ref_type_id = 'IdResultType' in operands
self.def_result_id = 'IdResult' in operands
self.version = convert_min_required_version(version)
self.lastVersion = convert_max_required_version(lastVersion)
def fix_syntax(self):
"""Fix an instruction's syntax, adjusting for differences between the
officially released grammar and how SPIRV-Tools uses the grammar.
Fixes:
- ExtInst should not end with SPV_OPERAND_VARIABLE_ID.
https://github.com/KhronosGroup/SPIRV-Tools/issues/233
"""
if (self.opname == 'ExtInst'
and self.operands[-1] == 'SPV_OPERAND_TYPE_VARIABLE_ID'):
self.operands.pop()
def __str__(self):
global OUTPUT_LANGUAGE
base_str = 'SpvOp'
if OUTPUT_LANGUAGE == 'c++':
base_str = 'spv::Op::Op'
template = ['{{"{opname}"', base_str + '{opname}',
'{num_caps}', '{caps_mask}',
'{num_operands}', '{{{operands}}}',
'{def_result_id}', '{ref_type_id}',
'{num_exts}', '{exts}',
'{min_version}', '{max_version}}}']
return ', '.join(template).format(
opname=self.opname,
num_caps=self.num_caps,
caps_mask=self.caps_mask,
num_operands=len(self.operands),
operands=', '.join(self.operands),
def_result_id=(1 if self.def_result_id else 0),
ref_type_id=(1 if self.ref_type_id else 0),
num_exts=self.num_exts,
exts=self.exts,
min_version=self.version,
max_version=self.lastVersion)
class ExtInstInitializer(object):
"""Instances holds a SPIR-V extended instruction suitable for printing as
the initializer for spv_ext_inst_desc_t."""
def __init__(self, opname, opcode, caps, operands):
"""Initialization.
Arguments:
- opname: opcode name
- opcode: enumerant value for this opcode
- caps: a sequence of capability names required by this opcode
- operands: a sequence of (operand-kind, operand-quantifier) tuples
"""
self.opname = opname
self.opcode = opcode
self.num_caps = len(caps)
self.caps_mask = get_capability_array_name(caps)
self.operands = [convert_operand_kind(o) for o in operands]
self.operands.append('SPV_OPERAND_TYPE_NONE')
def __str__(self):
template = ['{{"{opname}"', '{opcode}', '{num_caps}', '{caps_mask}',
'{{{operands}}}}}']
return ', '.join(template).format(
opname=self.opname,
opcode=self.opcode,
num_caps=self.num_caps,
caps_mask=self.caps_mask,
operands=', '.join(self.operands))
def generate_instruction(inst, is_ext_inst):
"""Returns the C initializer for the given SPIR-V instruction.
Arguments:
- inst: a dict containing information about a SPIR-V instruction
- is_ext_inst: a bool indicating whether |inst| is an extended
instruction.
Returns:
a string containing the C initializer for spv_opcode_desc_t or
spv_ext_inst_desc_t
"""
opname = inst.get('opname')
opcode = inst.get('opcode')
caps = inst.get('capabilities', [])
exts = inst.get('extensions', [])
operands = inst.get('operands', {})
operands = [(o['kind'], o.get('quantifier', '')) for o in operands]
min_version = inst.get('version', None)
max_version = inst.get('lastVersion', None)
assert opname is not None
if is_ext_inst:
return str(ExtInstInitializer(opname, opcode, caps, operands))
else:
return str(InstInitializer(opname, caps, exts, operands, min_version, max_version))
def generate_instruction_table(inst_table):
"""Returns the info table containing all SPIR-V instructions, sorted by
opcode, and prefixed by capability arrays.
Note:
- the built-in sorted() function is guaranteed to be stable.
https://docs.python.org/3/library/functions.html#sorted
Arguments:
- inst_table: a list containing all SPIR-V instructions.
"""
inst_table = sorted(inst_table, key=lambda k: (k['opcode'], k['opname']))
caps_arrays = generate_capability_arrays(
[inst.get('capabilities', []) for inst in inst_table])
exts_arrays = generate_extension_arrays(
[inst.get('extensions', []) for inst in inst_table])
insts = [generate_instruction(inst, False) for inst in inst_table]
insts = ['static const spv_opcode_desc_t kOpcodeTableEntries[] = {{\n'
' {}\n}};'.format(',\n '.join(insts))]
return '{}\n\n{}\n\n{}'.format(caps_arrays, exts_arrays, '\n'.join(insts))
def generate_extended_instruction_table(json_grammar, set_name, operand_kind_prefix=""):
"""Returns the info table containing all SPIR-V extended instructions,
sorted by opcode, and prefixed by capability arrays.
Arguments:
- inst_table: a list containing all SPIR-V instructions.
- set_name: the name of the extended instruction set.
- operand_kind_prefix: the prefix, if any, to add to the front
of operand kind names.
"""
if operand_kind_prefix:
prefix_operand_kind_names(operand_kind_prefix, json_grammar)
inst_table = json_grammar["instructions"]
set_name = set_name.replace(".", "_")
inst_table = sorted(inst_table, key=lambda k: k['opcode'])
caps = [inst.get('capabilities', []) for inst in inst_table]
caps_arrays = generate_capability_arrays(caps)
insts = [generate_instruction(inst, True) for inst in inst_table]
insts = ['static const spv_ext_inst_desc_t {}_entries[] = {{\n'
' {}\n}};'.format(set_name, ',\n '.join(insts))]
return '{}\n\n{}'.format(caps_arrays, '\n'.join(insts))
class EnumerantInitializer(object):
"""Prints an enumerant as the initializer for spv_operand_desc_t."""
def __init__(self, enumerant, value, caps, exts, parameters, version, lastVersion):
"""Initialization.
Arguments:
- enumerant: enumerant name
- value: enumerant value
- caps: a sequence of capability names required by this enumerant
- exts: a sequence of names of extensions enabling this enumerant
- parameters: a sequence of (operand-kind, operand-quantifier) tuples
- version: minimal SPIR-V version required for this opcode
- lastVersion: last SPIR-V version this opode appears
"""
self.enumerant = enumerant
self.value = value
self.num_caps = len(caps)
self.caps = get_capability_array_name(caps)
self.num_exts = len(exts)
self.exts = get_extension_array_name(exts)
self.parameters = [convert_operand_kind(p) for p in parameters]
self.version = convert_min_required_version(version)
self.lastVersion = convert_max_required_version(lastVersion)
def __str__(self):
template = ['{{"{enumerant}"', '{value}', '{num_caps}',
'{caps}', '{num_exts}', '{exts}',
'{{{parameters}}}', '{min_version}',
'{max_version}}}']
return ', '.join(template).format(
enumerant=self.enumerant,
value=self.value,
num_caps=self.num_caps,
caps=self.caps,
num_exts=self.num_exts,
exts=self.exts,
parameters=', '.join(self.parameters),
min_version=self.version,
max_version=self.lastVersion)
def generate_enum_operand_kind_entry(entry, extension_map):
"""Returns the C initializer for the given operand enum entry.
Arguments:
- entry: a dict containing information about an enum entry
- extension_map: a dict mapping enum value to list of extensions
Returns:
a string containing the C initializer for spv_operand_desc_t
"""
enumerant = entry.get('enumerant')
value = entry.get('value')
caps = entry.get('capabilities', [])
if value in extension_map:
exts = extension_map[value]
else:
exts = []
params = entry.get('parameters', [])
params = [p.get('kind') for p in params]
params = zip(params, [''] * len(params))
version = entry.get('version', None)
max_version = entry.get('lastVersion', None)
assert enumerant is not None
assert value is not None
return str(EnumerantInitializer(
enumerant, value, caps, exts, params, version, max_version))
def generate_enum_operand_kind(enum, synthetic_exts_list):
"""Returns the C definition for the given operand kind.
It's a static const named array of spv_operand_desc_t.
Also appends to |synthetic_exts_list| a list of extension lists
used.
"""
kind = enum.get('kind')
assert kind is not None
# Sort all enumerants according to their values, but otherwise
# preserve their order so the first name listed in the grammar
# as the preferred name for disassembly.
if enum.get('category') == 'ValueEnum':
def functor(k): return (k['value'])
else:
def functor(k): return (int(k['value'], 16))
entries = sorted(enum.get('enumerants', []), key=functor)
# SubgroupEqMask and SubgroupEqMaskKHR are the same number with
# same semantics, but one has no extension list while the other
# does. Both should have the extension list.
# So create a mapping from enum value to the union of the extensions
# across all those grammar entries. Preserve order.
extension_map = {}
for e in entries:
value = e.get('value')
extension_map[value] = []
for e in entries:
value = e.get('value')
exts = e.get('extensions', [])
for ext in exts:
if ext not in extension_map[value]:
extension_map[value].append(ext)
synthetic_exts_list.extend(extension_map.values())
name = '{}_{}Entries'.format(PYGEN_VARIABLE_PREFIX, kind)
entries = [' {}'.format(generate_enum_operand_kind_entry(e, extension_map))
for e in entries]
template = ['static const spv_operand_desc_t {name}[] = {{',
'{entries}', '}};']
entries = '\n'.join(template).format(
name=name,
entries=',\n'.join(entries))
return kind, name, entries
def generate_operand_kind_table(enums):
"""Returns the info table containing all SPIR-V operand kinds."""
# We only need to output info tables for those operand kinds that are enums.
enums = [e for e in enums if e.get('category') in ['ValueEnum', 'BitEnum']]
caps = [entry.get('capabilities', [])
for enum in enums
for entry in enum.get('enumerants', [])]
caps_arrays = generate_capability_arrays(caps)
exts = [entry.get('extensions', [])
for enum in enums
for entry in enum.get('enumerants', [])]
enums = [generate_enum_operand_kind(e, exts) for e in enums]
exts_arrays = generate_extension_arrays(exts)
# We have a few operand kinds that require their optional counterpart to
# exist in the operand info table.
optional_enums = ['ImageOperands', 'AccessQualifier', 'MemoryAccess', 'PackedVectorFormat']
optional_enums = [e for e in enums if e[0] in optional_enums]
enums.extend(optional_enums)
enum_kinds, enum_names, enum_entries = zip(*enums)
# Mark the last few as optional ones.
enum_quantifiers = [''] * (len(enums) - len(optional_enums)) + ['?'] * len(optional_enums)
# And we don't want redefinition of them.
enum_entries = enum_entries[:-len(optional_enums)]
enum_kinds = [convert_operand_kind(e)
for e in zip(enum_kinds, enum_quantifiers)]
table_entries = zip(enum_kinds, enum_names, enum_names)
table_entries = [' {{{}, ARRAY_SIZE({}), {}}}'.format(*e)
for e in table_entries]
template = [
'static const spv_operand_desc_group_t {p}_OperandInfoTable[] = {{',
'{enums}', '}};']
table = '\n'.join(template).format(
p=PYGEN_VARIABLE_PREFIX, enums=',\n'.join(table_entries))
return '\n\n'.join((caps_arrays,) + (exts_arrays,) + enum_entries + (table,))
def get_extension_list(instructions, operand_kinds):
"""Returns extensions as an alphabetically sorted list of strings."""
things_with_an_extensions_field = [item for item in instructions]
enumerants = sum([item.get('enumerants', [])
for item in operand_kinds], [])
things_with_an_extensions_field.extend(enumerants)
extensions = sum([item.get('extensions', [])
for item in things_with_an_extensions_field
if item.get('extensions')], [])
for item in EXTENSIONS_FROM_SPIRV_REGISTRY_AND_NOT_FROM_GRAMMARS.split():
# If it's already listed in a grammar, then don't put it in the
# special exceptions list.
assert item not in extensions, 'Extension %s is already in a grammar file' % item
extensions.extend(
EXTENSIONS_FROM_SPIRV_REGISTRY_AND_NOT_FROM_GRAMMARS.split())
# Validator would ignore type declaration unique check. Should only be used
# for legacy autogenerated test files containing multiple instances of the
# same type declaration, if fixing the test by other methods is too
# difficult. Shouldn't be used for any other reasons.
extensions.append('SPV_VALIDATOR_ignore_type_decl_unique')
return sorted(set(extensions))
def get_capabilities(operand_kinds):
"""Returns capabilities as a list of JSON objects, in order of
appearance."""
enumerants = sum([item.get('enumerants', []) for item in operand_kinds
if item.get('kind') in ['Capability']], [])
return enumerants
def generate_extension_enum(extensions):
"""Returns enumeration containing extensions declared in the grammar."""
return ',\n'.join(['k' + extension for extension in extensions])
def generate_extension_to_string_mapping(extensions):
"""Returns mapping function from extensions to corresponding strings."""
function = 'const char* ExtensionToString(Extension extension) {\n'
function += ' switch (extension) {\n'
template = ' case Extension::k{extension}:\n' \
' return "{extension}";\n'
function += ''.join([template.format(extension=extension)
for extension in extensions])
function += ' }\n\n return "";\n}'
return function
def generate_string_to_extension_mapping(extensions):
"""Returns mapping function from strings to corresponding extensions."""
function = '''
bool GetExtensionFromString(const char* str, Extension* extension) {{
static const char* known_ext_strs[] = {{ {strs} }};
static const Extension known_ext_ids[] = {{ {ids} }};
const auto b = std::begin(known_ext_strs);
const auto e = std::end(known_ext_strs);
const auto found = std::equal_range(
b, e, str, [](const char* str1, const char* str2) {{
return std::strcmp(str1, str2) < 0;
}});
if (found.first == e || found.first == found.second) return false;
*extension = known_ext_ids[found.first - b];
return true;
}}
'''.format(strs=', '.join(['"{}"'.format(e) for e in extensions]),
ids=', '.join(['Extension::k{}'.format(e) for e in extensions]))
return function
def generate_capability_to_string_mapping(operand_kinds):
"""Returns mapping function from capabilities to corresponding strings.
We take care to avoid emitting duplicate values.
"""
cap_str = 'SpvCapability'
cap_join = ''
global OUTPUT_LANGUAGE
if OUTPUT_LANGUAGE == 'c++':
cap_str = 'spv::Capability'
cap_join = '::'
function = 'const char* CapabilityToString(' + cap_str + ' capability) {\n'
function += ' switch (capability) {\n'
template = ' case ' + cap_str + cap_join + '{capability}:\n' \
' return "{capability}";\n'
emitted = set() # The values of capabilities we already have emitted
for capability in get_capabilities(operand_kinds):
value = capability.get('value')
if value not in emitted:
emitted.add(value)
function += template.format(capability=capability.get('enumerant'))
function += ' case ' + cap_str + cap_join + 'Max:\n' \
' assert(0 && "Attempting to convert ' + cap_str + cap_join + 'Max to string");\n' \
' return "";\n'
function += ' }\n\n return "";\n}'
return function
def generate_all_string_enum_mappings(extensions, operand_kinds):
"""Returns all string-to-enum / enum-to-string mapping tables."""
tables = []
tables.append(generate_extension_to_string_mapping(extensions))
tables.append(generate_string_to_extension_mapping(extensions))
tables.append(generate_capability_to_string_mapping(operand_kinds))
return '\n\n'.join(tables)
def precondition_operand_kinds(operand_kinds):
"""For operand kinds that have the same number, make sure they all have the
same extension list."""
# Map operand kind and value to list of the union of extensions
# for same-valued enumerants.
exts = {}
for kind_entry in operand_kinds:
kind = kind_entry.get('kind')
for enum_entry in kind_entry.get('enumerants', []):
value = enum_entry.get('value')
key = kind + '.' + str(value)
if key in exts:
exts[key].extend(enum_entry.get('extensions', []))
else:
exts[key] = enum_entry.get('extensions', [])
exts[key] = sorted(set(exts[key]))
# Now make each entry the same list.
for kind_entry in operand_kinds:
kind = kind_entry.get('kind')
for enum_entry in kind_entry.get('enumerants', []):
value = enum_entry.get('value')
key = kind + '.' + str(value)
if len(exts[key]) > 0:
enum_entry['extensions'] = exts[key]
return operand_kinds
def prefix_operand_kind_names(prefix, json_dict):
"""Modifies json_dict, by prefixing all the operand kind names
with the given prefix. Also modifies their uses in the instructions
to match.
"""
old_to_new = {}
for operand_kind in json_dict["operand_kinds"]:
old_name = operand_kind["kind"]
new_name = prefix + old_name
operand_kind["kind"] = new_name
old_to_new[old_name] = new_name
for instruction in json_dict["instructions"]:
for operand in instruction.get("operands", []):
replacement = old_to_new.get(operand["kind"])
if replacement is not None:
operand["kind"] = replacement
def main():
import argparse
parser = argparse.ArgumentParser(description='Generate SPIR-V info tables')
parser.add_argument('--spirv-core-grammar', metavar='<path>',
type=str, required=False,
help='input JSON grammar file for core SPIR-V '
'instructions')
parser.add_argument('--extinst-debuginfo-grammar', metavar='<path>',
type=str, required=False, default=None,
help='input JSON grammar file for DebugInfo extended '
'instruction set')
parser.add_argument('--extinst-cldebuginfo100-grammar', metavar='<path>',
type=str, required=False, default=None,
help='input JSON grammar file for OpenCL.DebugInfo.100 '
'extended instruction set')
parser.add_argument('--extinst-glsl-grammar', metavar='<path>',
type=str, required=False, default=None,
help='input JSON grammar file for GLSL extended '
'instruction set')
parser.add_argument('--extinst-opencl-grammar', metavar='<path>',
type=str, required=False, default=None,
help='input JSON grammar file for OpenCL extended '
'instruction set')
parser.add_argument('--output-language',
type=str, required=False, default='c',
choices=['c','c++'],
help='specify output language type')
parser.add_argument('--core-insts-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for core SPIR-V instructions')
parser.add_argument('--glsl-insts-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for GLSL extended instruction set')
parser.add_argument('--opencl-insts-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for OpenCL extended instruction set')
parser.add_argument('--operand-kinds-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for operand kinds')
parser.add_argument('--extension-enum-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for extension enumeration')
parser.add_argument('--enum-string-mapping-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for enum-string mappings')
parser.add_argument('--extinst-vendor-grammar', metavar='<path>',
type=str, required=False, default=None,
help='input JSON grammar file for vendor extended '
'instruction set'),
parser.add_argument('--vendor-insts-output', metavar='<path>',
type=str, required=False, default=None,
help='output file for vendor extended instruction set')
parser.add_argument('--vendor-operand-kind-prefix', metavar='<string>',
type=str, required=False, default=None,
help='prefix for operand kinds (to disambiguate operand type enums)')
args = parser.parse_args()
global OUTPUT_LANGUAGE
OUTPUT_LANGUAGE = args.output_language
# The GN build system needs this because it doesn't handle quoting
# empty string arguments well.
if args.vendor_operand_kind_prefix == "...nil...":
args.vendor_operand_kind_prefix = ""
if (args.core_insts_output is None) != \
(args.operand_kinds_output is None):
print('error: --core-insts-output and --operand-kinds-output '
'should be specified together.')
exit(1)
if args.operand_kinds_output and not (args.spirv_core_grammar and
args.extinst_debuginfo_grammar and
args.extinst_cldebuginfo100_grammar):
print('error: --operand-kinds-output requires --spirv-core-grammar '
'and --extinst-debuginfo-grammar '
'and --extinst-cldebuginfo100-grammar')
exit(1)
if (args.glsl_insts_output is None) != \
(args.extinst_glsl_grammar is None):
print('error: --glsl-insts-output and --extinst-glsl-grammar '
'should be specified together.')
exit(1)
if (args.opencl_insts_output is None) != \
(args.extinst_opencl_grammar is None):
print('error: --opencl-insts-output and --extinst-opencl-grammar '
'should be specified together.')
exit(1)
if (args.vendor_insts_output is None) != \
(args.extinst_vendor_grammar is None):
print('error: --vendor-insts-output and '
'--extinst-vendor-grammar should be specified together.')
exit(1)
if all([args.core_insts_output is None,
args.glsl_insts_output is None,
args.opencl_insts_output is None,
args.vendor_insts_output is None,
args.extension_enum_output is None,
args.enum_string_mapping_output is None]):
print('error: at least one output should be specified.')
exit(1)
if args.spirv_core_grammar is not None:
with open(args.spirv_core_grammar) as json_file:
core_grammar = json.loads(json_file.read())
with open(args.extinst_debuginfo_grammar) as debuginfo_json_file:
debuginfo_grammar = json.loads(debuginfo_json_file.read())
with open(args.extinst_cldebuginfo100_grammar) as cldebuginfo100_json_file:
cldebuginfo100_grammar = json.loads(cldebuginfo100_json_file.read())
prefix_operand_kind_names("CLDEBUG100_", cldebuginfo100_grammar)
instructions = []
instructions.extend(core_grammar['instructions'])
instructions.extend(debuginfo_grammar['instructions'])
instructions.extend(cldebuginfo100_grammar['instructions'])
operand_kinds = []
operand_kinds.extend(core_grammar['operand_kinds'])
operand_kinds.extend(debuginfo_grammar['operand_kinds'])
operand_kinds.extend(cldebuginfo100_grammar['operand_kinds'])
extensions = get_extension_list(instructions, operand_kinds)
operand_kinds = precondition_operand_kinds(operand_kinds)
if args.core_insts_output is not None:
make_path_to_file(args.core_insts_output)
make_path_to_file(args.operand_kinds_output)
with open(args.core_insts_output, 'w') as f:
f.write(generate_instruction_table(
core_grammar['instructions']))
with open(args.operand_kinds_output, 'w') as f:
f.write(generate_operand_kind_table(operand_kinds))
if args.extension_enum_output is not None:
make_path_to_file(args.extension_enum_output)
with open(args.extension_enum_output, 'w') as f:
f.write(generate_extension_enum(extensions))
if args.enum_string_mapping_output is not None:
make_path_to_file(args.enum_string_mapping_output)
with open(args.enum_string_mapping_output, 'w') as f:
f.write(generate_all_string_enum_mappings(
extensions, operand_kinds))
if args.extinst_glsl_grammar is not None:
with open(args.extinst_glsl_grammar) as json_file:
grammar = json.loads(json_file.read())
make_path_to_file(args.glsl_insts_output)
with open(args.glsl_insts_output, 'w') as f:
f.write(generate_extended_instruction_table(
grammar, 'glsl'))
if args.extinst_opencl_grammar is not None:
with open(args.extinst_opencl_grammar) as json_file:
grammar = json.loads(json_file.read())
make_path_to_file(args.opencl_insts_output)
with open(args.opencl_insts_output, 'w') as f:
f.write(generate_extended_instruction_table(
grammar, 'opencl'))
if args.extinst_vendor_grammar is not None:
with open(args.extinst_vendor_grammar) as json_file:
grammar = json.loads(json_file.read())
make_path_to_file(args.vendor_insts_output)
name = args.extinst_vendor_grammar
start = name.find('extinst.') + len('extinst.')
name = name[start:-len('.grammar.json')].replace('-', '_')
with open(args.vendor_insts_output, 'w') as f:
f.write(generate_extended_instruction_table(
grammar, name, args.vendor_operand_kind_prefix))
if __name__ == '__main__':
main()
|
{
"content_hash": "b36b64485580282622aa9868237e6627",
"timestamp": "",
"source": "github",
"line_count": 889,
"max_line_length": 101,
"avg_line_length": 39.72778402699662,
"alnum_prop": 0.6090945127130641,
"repo_name": "KhronosGroup/SPIRV-Tools",
"id": "6b7167b86ab2d922af523f78f78f7e0937f7be82",
"size": "35918",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "utils/generate_grammar_tables.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9840"
},
{
"name": "C",
"bytes": "69083"
},
{
"name": "C++",
"bytes": "19619222"
},
{
"name": "CMake",
"bytes": "122652"
},
{
"name": "Emacs Lisp",
"bytes": "1654"
},
{
"name": "Go",
"bytes": "729085"
},
{
"name": "HTML",
"bytes": "493"
},
{
"name": "JavaScript",
"bytes": "136536"
},
{
"name": "Makefile",
"bytes": "15739"
},
{
"name": "Python",
"bytes": "167792"
},
{
"name": "Ruby",
"bytes": "2885"
},
{
"name": "Shell",
"bytes": "30372"
},
{
"name": "Starlark",
"bytes": "24927"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from collections import namedtuple
import enum
import re
import sqlalchemy as sa
import slugify
from hyputils.memex.db import Base
from hyputils.memex.db import mixins
from hyputils.memex import pubid
from hyputils.memex.util.group import split_groupid
from hyputils.memex.security import security
GROUP_NAME_MIN_LENGTH = 3
GROUP_NAME_MAX_LENGTH = 25
GROUP_DESCRIPTION_MAX_LENGTH = 250
AUTHORITY_PROVIDED_ID_PATTERN = r"^[a-zA-Z0-9._\-+!~*()']+$"
AUTHORITY_PROVIDED_ID_MAX_LENGTH = 1024
class JoinableBy(enum.Enum):
authority = "authority"
class ReadableBy(enum.Enum):
members = "members"
world = "world"
class WriteableBy(enum.Enum):
authority = "authority"
members = "members"
class Group(Base, mixins.Timestamps):
__tablename__ = "group"
__table_args__ = (
# Add a composite index of the (authority, authority_provided_id)
# columns. Also impose uniqueness such that no two records may share
# the same (authority, authority_provided_id) composite
#
# See:
#
# * http://docs.sqlalchemy.org/en/latest/core/constraints.html#indexes
sa.Index(
"ix__group__groupid", "authority", "authority_provided_id", unique=True
),
)
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
# We don't expose the integer PK to the world, so we generate a short
# random string to use as the publicly visible ID.
pubid = sa.Column(sa.Text(), default=pubid.generate, unique=True, nullable=False)
authority = sa.Column(sa.UnicodeText(), nullable=False)
name = sa.Column(sa.UnicodeText(), nullable=False, index=True)
creator_id = sa.Column(sa.Integer, sa.ForeignKey("user.id"))
creator = sa.orm.relationship("User")
description = sa.Column(sa.UnicodeText())
#: Allow authorities to define their own unique identifier for a group
#: (versus the pubid). This identifier is owned by the authority/client
#: versus ``pubid``, which is owned and controlled by the service.
authority_provided_id = sa.Column(sa.UnicodeText(), nullable=True)
#: Which type of user is allowed to join this group, possible values are:
#: authority, None
joinable_by = sa.Column(
sa.Enum(JoinableBy, name="group_joinable_by"), nullable=True
)
#: Which type of user is allowed to read annotations in this group,
#: possible values are: authority, members, world
readable_by = sa.Column(
sa.Enum(ReadableBy, name="group_readable_by"), nullable=True, index=True
)
#: Which type of user is allowed to write to this group, possible values
#: are: authority, members
writeable_by = sa.Column(
sa.Enum(WriteableBy, name="group_writeable_by"), nullable=True
)
@property
def groupid(self):
if self.authority_provided_id is None:
return None
return "group:{authority_provided_id}@{authority}".format(
authority_provided_id=self.authority_provided_id, authority=self.authority
)
@groupid.setter
def groupid(self, value):
"""
Deconstruct a formatted ``groupid`` and set its constituent properties
on the instance.
If ``groupid`` is set to None, set ``authority_provided_id`` to None
but leave authority untouched—this allows a caller to nullify the
``authority_provided_id`` field.
:raises ValueError: if ``groupid`` is an invalid format
"""
if value is None:
self.authority_provided_id = None
else:
groupid_parts = split_groupid(value)
self.authority_provided_id = groupid_parts["authority_provided_id"]
self.authority = groupid_parts["authority"]
# Group membership
members = sa.orm.relationship(
"User",
secondary="user_group",
backref=sa.orm.backref("groups", order_by="Group.name"),
)
organization_id = sa.Column(
sa.Integer, sa.ForeignKey("organization.id"), nullable=True
)
organization = sa.orm.relationship("Organization")
def __init__(self, **kwargs):
super(Group, self).__init__(**kwargs)
@sa.orm.validates("name")
def validate_name(self, key, name):
if not GROUP_NAME_MIN_LENGTH <= len(name) <= GROUP_NAME_MAX_LENGTH:
raise ValueError(
"name must be between {min} and {max} characters "
"long".format(min=GROUP_NAME_MIN_LENGTH, max=GROUP_NAME_MAX_LENGTH)
)
return name
@sa.orm.validates("authority_provided_id")
def validate_authority_provided_id(self, key, authority_provided_id):
if not authority_provided_id:
return None
if not re.match(AUTHORITY_PROVIDED_ID_PATTERN, authority_provided_id):
raise ValueError(
"authority_provided_id must only contain characters allowed"
r" in encoded URIs: [a-zA-Z0-9._\-+!~*()']"
)
if len(authority_provided_id) > AUTHORITY_PROVIDED_ID_MAX_LENGTH:
raise ValueError(
"authority_provided_id must be {max} characters or fewer"
"characters long".format(max=AUTHORITY_PROVIDED_ID_MAX_LENGTH)
)
return authority_provided_id
@property
def slug(self):
"""A version of this group's name suitable for use in a URL."""
return slugify.slugify(self.name)
@property
def type(self):
"""
The "type" of this group, e.g. "open" or "private".
:rtype: string
:raises ValueError: if the type of the group isn't recognized
"""
self_type_flags = TypeFlags(
joinable_by=self.joinable_by,
readable_by=self.readable_by,
writeable_by=self.writeable_by,
)
for type_, type_flags in (
("open", OPEN_GROUP_TYPE_FLAGS),
("private", PRIVATE_GROUP_TYPE_FLAGS),
("restricted", RESTRICTED_GROUP_TYPE_FLAGS),
):
if self_type_flags == type_flags:
return type_
raise ValueError(
"This group doesn't seem to match any known type of group. "
"This shouldn't be in the database!"
)
@property
def is_public(self):
return self.readable_by == ReadableBy.world
def __acl__(self):
terms = []
join_principal = _join_principal(self)
if join_principal is not None:
terms.append((security.Allow, join_principal, "join"))
read_principal = _read_principal(self)
if read_principal is not None:
terms.append((security.Allow, read_principal, "read"))
flag_principal = _flag_principal(self)
if flag_principal is not None:
terms.append((security.Allow, flag_principal, "flag"))
write_principal = _write_principal(self)
if write_principal is not None:
terms.append((security.Allow, write_principal, "write"))
if self.creator:
# The creator of the group should be able to update it
terms.append((security.Allow, self.creator.userid, "admin"))
terms.append((security.Allow, self.creator.userid, "moderate"))
# The creator may update this group in an upsert context
terms.append((security.Allow, self.creator.userid, "upsert"))
# This authority principal may be used to grant auth clients
# permissions for groups within their authority
authority_principal = "client_authority:{}".format(self.authority)
# auth_clients that have the same authority as the target group
# may add members to it
terms.append((security.Allow, authority_principal, "member_add"))
# auth_clients that have the same authority as this group
# should be allowed to update it
terms.append((security.Allow, authority_principal, "admin"))
# auth_clients with matching authority should be able to read
# the group
terms.append((security.Allow, authority_principal, "read"))
terms.append(security.DENY_ALL)
return terms
def __repr__(self):
return "<Group: %s>" % self.slug
@classmethod
def created_by(cls, session, user):
"""Return a query object filtering groups by creator."""
return session.query(cls).filter(Group.creator == user)
def _join_principal(group):
return {JoinableBy.authority: "authority:{}".format(group.authority)}.get(
group.joinable_by
)
def _read_principal(group):
return {
ReadableBy.members: "group:{}".format(group.pubid),
ReadableBy.world: security.Everyone,
}.get(group.readable_by)
def _flag_principal(group):
# If a user can read (see) annotations within this group,
# they can also flag them—but they need to be logged in
# (``pyramid.security.Authenticated``)
return {
ReadableBy.members: "group:{}".format(group.pubid),
ReadableBy.world: security.Authenticated,
}.get(group.readable_by)
def _write_principal(group):
return {
WriteableBy.authority: "authority:{}".format(group.authority),
WriteableBy.members: "group:{}".format(group.pubid),
}.get(group.writeable_by)
TypeFlags = namedtuple("TypeFlags", "joinable_by readable_by writeable_by")
OPEN_GROUP_TYPE_FLAGS = TypeFlags(
joinable_by=None, readable_by=ReadableBy.world, writeable_by=WriteableBy.authority
)
PRIVATE_GROUP_TYPE_FLAGS = TypeFlags(
joinable_by=JoinableBy.authority,
readable_by=ReadableBy.members,
writeable_by=WriteableBy.members,
)
RESTRICTED_GROUP_TYPE_FLAGS = TypeFlags(
joinable_by=None, readable_by=ReadableBy.world, writeable_by=WriteableBy.members
)
USER_GROUP_TABLE = sa.Table(
"user_group",
Base.metadata,
sa.Column("id", sa.Integer, autoincrement=True, primary_key=True),
sa.Column("user_id", sa.Integer, sa.ForeignKey("user.id"), nullable=False),
sa.Column("group_id", sa.Integer, sa.ForeignKey("group.id"), nullable=False),
sa.UniqueConstraint("user_id", "group_id"),
)
|
{
"content_hash": "49609a0f190267e74d31c0287dd1024c",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 86,
"avg_line_length": 33.40327868852459,
"alnum_prop": 0.6399685904986259,
"repo_name": "tgbugs/hypush",
"id": "97092db4ed9e1173fee9b02737a3d3be04e0329b",
"size": "10217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyputils/memex/models/group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8410"
}
],
"symlink_target": ""
}
|
"""Image manipulation API.
Classes defined in this module:
Image: class used to encapsulate image information and transformations for
that image.
The current manipulations that are available are resize, rotate,
horizontal_flip, vertical_flip, crop and im_feeling_lucky.
It should be noted that each transform can only be called once per image
per execute_transforms() call.
"""
import struct
try:
import json
except:
import simplejson as json
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import blobstore
from google.appengine.api import datastore_types
from google.appengine.api.images import images_service_pb
from google.appengine.runtime import apiproxy_errors
BlobKey = datastore_types.BlobKey
JPEG = images_service_pb.OutputSettings.JPEG
PNG = images_service_pb.OutputSettings.PNG
WEBP = images_service_pb.OutputSettings.WEBP
BMP = -1
GIF = -2
ICO = -3
TIFF = -4
OUTPUT_ENCODING_TYPES = frozenset([JPEG, PNG, WEBP])
UNCHANGED_ORIENTATION = images_service_pb.InputSettings.UNCHANGED_ORIENTATION
CORRECT_ORIENTATION = images_service_pb.InputSettings.CORRECT_ORIENTATION
ORIENTATION_CORRECTION_TYPE = frozenset([UNCHANGED_ORIENTATION,
CORRECT_ORIENTATION])
TOP_LEFT = images_service_pb.CompositeImageOptions.TOP_LEFT
TOP_CENTER = images_service_pb.CompositeImageOptions.TOP
TOP_RIGHT = images_service_pb.CompositeImageOptions.TOP_RIGHT
CENTER_LEFT = images_service_pb.CompositeImageOptions.LEFT
CENTER_CENTER = images_service_pb.CompositeImageOptions.CENTER
CENTER_RIGHT = images_service_pb.CompositeImageOptions.RIGHT
BOTTOM_LEFT = images_service_pb.CompositeImageOptions.BOTTOM_LEFT
BOTTOM_CENTER = images_service_pb.CompositeImageOptions.BOTTOM
BOTTOM_RIGHT = images_service_pb.CompositeImageOptions.BOTTOM_RIGHT
ANCHOR_TYPES = frozenset([TOP_LEFT, TOP_CENTER, TOP_RIGHT, CENTER_LEFT,
CENTER_CENTER, CENTER_RIGHT, BOTTOM_LEFT,
BOTTOM_CENTER, BOTTOM_RIGHT])
MAX_TRANSFORMS_PER_REQUEST = 10
MAX_COMPOSITES_PER_REQUEST = 16
class Error(Exception):
"""Base error class for this module."""
class TransformationError(Error):
"""Error while attempting to transform the image."""
class BadRequestError(Error):
"""The parameters given had something wrong with them."""
class NotImageError(Error):
"""The image data given is not recognizable as an image."""
class BadImageError(Error):
"""The image data given is corrupt."""
class LargeImageError(Error):
"""The image data given is too large to process."""
class InvalidBlobKeyError(Error):
"""The provided blob key was invalid."""
def __init__(self, blob_key=None):
"""Constructor.
Args:
blob_key: The blob_key that is believed to be invalid. May be None if the
BlobKey is unknown.
"""
self._blob_key = blob_key
def __str__(self):
"""Returns a string representation of this Error."""
if self._blob_key:
return 'InvalidBlobKeyError: %s' % repr(self._blob_key)
else:
return 'InvalidBlobKeyError'
class BlobKeyRequiredError(Error):
"""A blobkey is required for this operation."""
class UnsupportedSizeError(Error):
"""Specified size is not supported by requested operation."""
class AccessDeniedError(Error):
"""The application does not have permission to access the image."""
class ObjectNotFoundError(Error):
"""The object referred to by a BlobKey does not exist."""
def _ToImagesError(error, blob_key=None):
"""Translate an application error to an Images error, if possible.
Args:
error: an ApplicationError to translate.
blob_key: The blob_key that used in the function that caused the error.
May be None if the BlobKey is unknown.
Returns:
The Images error if found, otherwise the original error.
"""
error_map = {
images_service_pb.ImagesServiceError.NOT_IMAGE:
NotImageError,
images_service_pb.ImagesServiceError.BAD_IMAGE_DATA:
BadImageError,
images_service_pb.ImagesServiceError.IMAGE_TOO_LARGE:
LargeImageError,
images_service_pb.ImagesServiceError.INVALID_BLOB_KEY:
InvalidBlobKeyError,
images_service_pb.ImagesServiceError.ACCESS_DENIED:
AccessDeniedError,
images_service_pb.ImagesServiceError.OBJECT_NOT_FOUND:
ObjectNotFoundError,
images_service_pb.ImagesServiceError.UNSPECIFIED_ERROR:
TransformationError,
images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA:
BadRequestError,
}
error_code = error.application_error
if error_code == images_service_pb.ImagesServiceError.INVALID_BLOB_KEY:
return InvalidBlobKeyError(blob_key)
desired_exc = error_map.get(error_code, Error)
return desired_exc(error.error_detail)
class Image(object):
"""Image object to manipulate."""
def __init__(self, image_data=None, blob_key=None, filename=None):
"""Constructor.
Only one of image_data, blob_key or filename can be specified.
Args:
image_data: str, image data in string form.
blob_key: BlobKey, BlobInfo, str, or unicode representation of BlobKey of
blob containing the image data.
filename: str, the filename of a Google Storage file containing the
image data. Must be in the format '/gs/bucket_name/object_name'.
Raises:
NotImageError if the given data is empty.
"""
if not image_data and not blob_key and not filename:
raise NotImageError("Empty image data.")
if image_data and (blob_key or filename):
raise NotImageError("Can only take one of image, blob key or filename.")
if blob_key and filename:
raise NotImageError("Can only take one of image, blob key or filename.")
self._image_data = image_data
if filename:
self._blob_key = blobstore.create_gs_key(filename)
else:
self._blob_key = _extract_blob_key(blob_key)
self._transforms = []
self._width = None
self._height = None
self._format = None
self._correct_orientation = UNCHANGED_ORIENTATION
self._original_metadata = None
def _check_transform_limits(self):
"""Ensure some simple limits on the number of transforms allowed.
Raises:
BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already been
requested for this image
"""
if len(self._transforms) >= MAX_TRANSFORMS_PER_REQUEST:
raise BadRequestError("%d transforms have already been requested on this "
"image." % MAX_TRANSFORMS_PER_REQUEST)
def _update_dimensions(self):
"""Updates the width and height fields of the image.
Raises:
NotImageError if the image data is not an image.
BadImageError if the image data is corrupt.
"""
if not self._image_data:
raise NotImageError("Dimensions unavailable for blob key input")
size = len(self._image_data)
if size >= 6 and self._image_data.startswith("GIF"):
self._update_gif_dimensions()
self._format = GIF;
elif size >= 8 and self._image_data.startswith("\x89PNG\x0D\x0A\x1A\x0A"):
self._update_png_dimensions()
self._format = PNG
elif size >= 2 and self._image_data.startswith("\xff\xD8"):
self._update_jpeg_dimensions()
self._format = JPEG
elif (size >= 8 and (self._image_data.startswith("II\x2a\x00") or
self._image_data.startswith("MM\x00\x2a"))):
self._update_tiff_dimensions()
self._format = TIFF
elif size >= 2 and self._image_data.startswith("BM"):
self._update_bmp_dimensions()
self._format = BMP
elif size >= 4 and self._image_data.startswith("\x00\x00\x01\x00"):
self._update_ico_dimensions()
self._format = ICO
elif (size >= 16 and (self._image_data.startswith("RIFF", 0, 4) and
self._image_data.startswith("WEBP", 8, 12) and
self._image_data.startswith("VP8 ", 12, 16))):
self._update_webp_dimensions()
self._format = WEBP
elif (size >= 16 and (self._image_data.startswith("RIFF", 0, 4) and
self._image_data.startswith("WEBP", 8, 12) and
self._image_data.startswith("VP8X", 12, 16))):
self._update_webp_vp8x_dimensions()
self._format = WEBP
else:
raise NotImageError("Unrecognized image format")
def _update_gif_dimensions(self):
"""Updates the width and height fields of the gif image.
Raises:
BadImageError if the image string is not a valid gif image.
"""
size = len(self._image_data)
if size >= 10:
self._width, self._height = struct.unpack("<HH", self._image_data[6:10])
else:
raise BadImageError("Corrupt GIF format")
def _update_png_dimensions(self):
"""Updates the width and height fields of the png image.
Raises:
BadImageError if the image string is not a valid png image.
"""
size = len(self._image_data)
if size >= 24 and self._image_data[12:16] == "IHDR":
self._width, self._height = struct.unpack(">II", self._image_data[16:24])
else:
raise BadImageError("Corrupt PNG format")
def _update_jpeg_dimensions(self):
"""Updates the width and height fields of the jpeg image.
Raises:
BadImageError if the image string is not a valid jpeg image.
"""
size = len(self._image_data)
offset = 2
while offset < size:
while offset < size and ord(self._image_data[offset]) != 0xFF:
offset += 1
while offset < size and ord(self._image_data[offset]) == 0xFF:
offset += 1
if (offset < size and ord(self._image_data[offset]) & 0xF0 == 0xC0 and
ord(self._image_data[offset]) != 0xC4):
offset += 4
if offset + 4 <= size:
self._height, self._width = struct.unpack(
">HH",
self._image_data[offset:offset + 4])
break
else:
raise BadImageError("Corrupt JPEG format")
elif offset + 3 <= size:
offset += 1
offset += struct.unpack(">H", self._image_data[offset:offset + 2])[0]
else:
raise BadImageError("Corrupt JPEG format")
if self._height is None or self._width is None:
raise BadImageError("Corrupt JPEG format")
def _update_tiff_dimensions(self):
"""Updates the width and height fields of the tiff image.
Raises:
BadImageError if the image string is not a valid tiff image.
"""
size = len(self._image_data)
if self._image_data.startswith("II"):
endianness = "<"
else:
endianness = ">"
ifd_offset = struct.unpack(endianness + "I", self._image_data[4:8])[0]
if ifd_offset + 14 <= size:
ifd_size = struct.unpack(
endianness + "H",
self._image_data[ifd_offset:ifd_offset + 2])[0]
ifd_offset += 2
for unused_i in range(0, ifd_size):
if ifd_offset + 12 <= size:
tag = struct.unpack(
endianness + "H",
self._image_data[ifd_offset:ifd_offset + 2])[0]
if tag == 0x100 or tag == 0x101:
value_type = struct.unpack(
endianness + "H",
self._image_data[ifd_offset + 2:ifd_offset + 4])[0]
if value_type == 3:
format = endianness + "H"
end_offset = ifd_offset + 10
elif value_type == 4:
format = endianness + "I"
end_offset = ifd_offset + 12
else:
format = endianness + "B"
end_offset = ifd_offset + 9
if tag == 0x100:
self._width = struct.unpack(
format,
self._image_data[ifd_offset + 8:end_offset])[0]
if self._height is not None:
break
else:
self._height = struct.unpack(
format,
self._image_data[ifd_offset + 8:end_offset])[0]
if self._width is not None:
break
ifd_offset += 12
else:
raise BadImageError("Corrupt TIFF format")
if self._width is None or self._height is None:
raise BadImageError("Corrupt TIFF format")
def _update_bmp_dimensions(self):
"""Updates the width and height fields of the bmp image.
Raises:
BadImageError if the image string is not a valid bmp image.
"""
size = len(self._image_data)
if size >= 18:
header_length = struct.unpack("<I", self._image_data[14:18])[0]
if ((header_length == 40 or header_length == 108 or
header_length == 124 or header_length == 64) and size >= 26):
self._width, self._height = struct.unpack("<II",
self._image_data[18:26])
elif header_length == 12 and size >= 22:
self._width, self._height = struct.unpack("<HH",
self._image_data[18:22])
else:
raise BadImageError("Corrupt BMP format")
else:
raise BadImageError("Corrupt BMP format")
def _update_ico_dimensions(self):
"""Updates the width and height fields of the ico image.
Raises:
BadImageError if the image string is not a valid ico image.
"""
size = len(self._image_data)
if size >= 8:
self._width, self._height = struct.unpack("<BB", self._image_data[6:8])
if not self._width:
self._width = 256
if not self._height:
self._height = 256
else:
raise BadImageError("Corrupt ICO format")
def set_correct_orientation(self, correct_orientation):
"""Set flag to correct image orientation based on image metadata.
EXIF metadata within the image may contain a parameter indicating its proper
orientation. This value can equal 1 through 8, inclusive. "1" means that the
image is in its "normal" orientation, i.e., it should be viewed as it is
stored. Normally, this "orientation" value has no effect on the behavior of
the transformations. However, calling this function with the value
CORRECT_ORIENTATION any orientation specified in the EXIF metadata will be
corrected during the first transformation.
NOTE: If CORRECT_ORIENTATION is specified but the image is already in
portrait orientation, i.e., "taller" than it is "wide" no corrections will
be made, since it appears that the camera has already corrected it.
Regardless whether the correction was requested or not, the orientation
value in the transformed image is always cleared to indicate that no
additional corrections of the returned image's orientation is necessary.
Args:
correct_orientation: a value from ORIENTATION_CORRECTION_TYPE.
Raises:
BadRequestError if correct_orientation value is invalid.
"""
if correct_orientation not in ORIENTATION_CORRECTION_TYPE:
raise BadRequestError("Orientation correction must be in %s" %
ORIENTATION_CORRECTION_TYPE)
self._correct_orientation = correct_orientation
def _update_webp_dimensions(self):
"""Updates the width and height fields of the webp image."""
size = len(self._image_data)
if size < 30:
raise BadImageError("Corrupt WEBP format")
bits = (ord(self._image_data[20]) | (ord(self._image_data[21])<<8) |
(ord(self._image_data[22]) << 16))
key_frame = ((bits & 1) == 0)
if not key_frame:
raise BadImageError("Corrupt WEBP format")
profile = (bits >> 1) & 7
show_frame = (bits >> 4) & 1
if profile > 3:
raise BadImageError("Corrupt WEBP format")
if show_frame == 0:
raise BadImageError("Corrupt WEBP format")
self._width, self._height = struct.unpack("<HH", self._image_data[26:30])
if self._height is None or self._width is None:
raise BadImageError("Corrupt WEBP format")
def _update_webp_vp8x_dimensions(self):
"""Updates the width and height fields of a webp image with vp8x chunk."""
size = len(self._image_data)
if size < 30:
raise BadImageError("Corrupt WEBP format")
self._width, self._height = struct.unpack("<II", self._image_data[24:32])
if self._height is None or self._width is None:
raise BadImageError("Corrupt WEBP format")
def resize(self, width=0, height=0, crop_to_fit=False,
crop_offset_x=0.5, crop_offset_y=0.5, allow_stretch=False):
"""Resize the image maintaining the aspect ratio.
If both width and height are specified, the more restricting of the two
values will be used when resizing the image. The maximum dimension allowed
for both width and height is 4000 pixels.
If both width and height are specified and crop_to_fit is True, the less
restricting of the two values will be used when resizing and the image will
be cropped to fit the specified size. In this case the center of cropping
can be adjusted by crop_offset_x and crop_offset_y.
Args:
width: int, width (in pixels) to change the image width to.
height: int, height (in pixels) to change the image height to.
crop_to_fit: If True and both width and height are specified, the image is
cropped after resize to fit the specified dimensions.
crop_offset_x: float value between 0.0 and 1.0, 0 is left and 1 is right,
default is 0.5, the center of image.
crop_offset_y: float value between 0.0 and 1.0, 0 is top and 1 is bottom,
default is 0.5, the center of image.
allow_stretch: If True and both width and height are specified, the image
is stretched to fit the resize dimensions without maintaining the
aspect ratio.
Raises:
TypeError when width or height is not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given height or
width or if MAX_TRANSFORMS_PER_REQUEST transforms have already been
requested on this image.
"""
if (not isinstance(width, int) or
not isinstance(height, int)):
raise TypeError("Width and height must be integers.")
if width < 0 or height < 0:
raise BadRequestError("Width and height must be >= 0.")
if not width and not height:
raise BadRequestError("At least one of width or height must be > 0.")
if width > 4000 or height > 4000:
raise BadRequestError("Both width and height must be <= 4000.")
if not isinstance(crop_to_fit, bool):
raise TypeError("crop_to_fit must be boolean.")
if crop_to_fit and not (width and height):
raise BadRequestError("Both width and height must be > 0 when "
"crop_to_fit is specified.")
if not isinstance(allow_stretch, bool):
raise TypeError("allow_stretch must be boolean.")
if allow_stretch and not (width and height):
raise BadRequestError("Both width and height must be > 0 when "
"allow_stretch is specified.")
self._validate_crop_arg(crop_offset_x, "crop_offset_x")
self._validate_crop_arg(crop_offset_y, "crop_offset_y")
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_width(width)
transform.set_height(height)
transform.set_crop_to_fit(crop_to_fit)
transform.set_crop_offset_x(crop_offset_x)
transform.set_crop_offset_y(crop_offset_y)
transform.set_allow_stretch(allow_stretch)
self._transforms.append(transform)
def rotate(self, degrees):
"""Rotate an image a given number of degrees clockwise.
Args:
degrees: int, must be a multiple of 90.
Raises:
TypeError when degrees is not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given degrees or
if MAX_TRANSFORMS_PER_REQUEST transforms have already been requested.
"""
if not isinstance(degrees, int):
raise TypeError("Degrees must be integers.")
if degrees % 90 != 0:
raise BadRequestError("degrees argument must be multiple of 90.")
degrees = degrees % 360
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_rotate(degrees)
self._transforms.append(transform)
def horizontal_flip(self):
"""Flip the image horizontally.
Raises:
BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already been
requested on the image.
"""
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_horizontal_flip(True)
self._transforms.append(transform)
def vertical_flip(self):
"""Flip the image vertically.
Raises:
BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already been
requested on the image.
"""
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_vertical_flip(True)
self._transforms.append(transform)
def _validate_crop_arg(self, val, val_name):
"""Validate the given value of a Crop() method argument.
Args:
val: float, value of the argument.
val_name: str, name of the argument.
Raises:
TypeError if the args are not of type 'float'.
BadRequestError when there is something wrong with the given bounding box.
"""
if type(val) != float:
raise TypeError("arg '%s' must be of type 'float'." % val_name)
if not (0 <= val <= 1.0):
raise BadRequestError("arg '%s' must be between 0.0 and 1.0 "
"(inclusive)" % val_name)
def crop(self, left_x, top_y, right_x, bottom_y):
"""Crop the image.
The four arguments are the scaling numbers to describe the bounding box
which will crop the image. The upper left point of the bounding box will
be at (left_x*image_width, top_y*image_height) the lower right point will
be at (right_x*image_width, bottom_y*image_height).
Args:
left_x: float value between 0.0 and 1.0 (inclusive).
top_y: float value between 0.0 and 1.0 (inclusive).
right_x: float value between 0.0 and 1.0 (inclusive).
bottom_y: float value between 0.0 and 1.0 (inclusive).
Raises:
TypeError if the args are not of type 'float'.
BadRequestError when there is something wrong with the given bounding box
or if MAX_TRANSFORMS_PER_REQUEST transforms have already been requested
for this image.
"""
self._validate_crop_arg(left_x, "left_x")
self._validate_crop_arg(top_y, "top_y")
self._validate_crop_arg(right_x, "right_x")
self._validate_crop_arg(bottom_y, "bottom_y")
if left_x >= right_x:
raise BadRequestError("left_x must be less than right_x")
if top_y >= bottom_y:
raise BadRequestError("top_y must be less than bottom_y")
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_crop_left_x(left_x)
transform.set_crop_top_y(top_y)
transform.set_crop_right_x(right_x)
transform.set_crop_bottom_y(bottom_y)
self._transforms.append(transform)
def im_feeling_lucky(self):
"""Automatically adjust image contrast and color levels.
This is similar to the "I'm Feeling Lucky" button in Picasa.
Raises:
BadRequestError if MAX_TRANSFORMS_PER_REQUEST transforms have already
been requested for this image.
"""
self._check_transform_limits()
transform = images_service_pb.Transform()
transform.set_autolevels(True)
self._transforms.append(transform)
def get_original_metadata(self):
"""Metadata of the original image.
Returns a dictionary of metadata extracted from the original image during
execute_transform.
Note, that some of the EXIF fields are processed, e.g., fields with multiple
values returned as lists, rational types are returned as floats, GPS
coordinates already parsed to signed floats, etc.
ImageWidth and ImageLength fields are corrected if they did not correspond
to the actual dimensions of the original image.
Returns:
dict with string keys. If execute_transform was called with parse_metadata
being True, this dictionary contains information about various properties
of the original image, such as dimensions, color profile, and properties
from EXIF.
Even if parse_metadata was False or the images did not have any metadata,
the dictionary will contain a limited set of metadata, at least
'ImageWidth' and 'ImageLength', corresponding to the dimensions of the
original image.
It will return None, if it is called before a successful
execute_transfrom.
"""
return self._original_metadata
def _set_imagedata(self, imagedata):
"""Fills in an ImageData PB from this Image instance.
Args:
imagedata: An ImageData PB instance
"""
if self._blob_key:
imagedata.set_content("")
imagedata.set_blob_key(self._blob_key)
else:
imagedata.set_content(self._image_data)
def execute_transforms(self, output_encoding=PNG, quality=None,
parse_source_metadata=False,
transparent_substitution_rgb=None,
rpc=None):
"""Perform transformations on a given image.
Args:
output_encoding: A value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG & WEBP quality control.
parse_source_metadata: when True the metadata (EXIF) of the source image
is parsed before any transformations. The results can be retrieved
via Image.get_original_metadata.
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
rpc: A UserRPC object.
Returns:
str, image data after the transformations have been performed on it.
Raises:
BadRequestError when there is something wrong with the request
specifications.
NotImageError when the image data given is not an image.
BadImageError when the image data given is corrupt.
LargeImageError when the image data given is too large to process.
InvalidBlobKeyError when the blob key provided is invalid.
TransformtionError when something errors during image manipulation.
AccessDeniedError: when the blobkey refers to a Google Storage object, and
the application does not have permission to access the object.
ObjectNotFoundError:: when the blobkey refers to an object that no longer
exists.
Error when something unknown, but bad, happens.
"""
rpc = self.execute_transforms_async(output_encoding=output_encoding,
quality=quality,
parse_source_metadata=parse_source_metadata,
transparent_substitution_rgb=transparent_substitution_rgb,
rpc=rpc)
return rpc.get_result()
def execute_transforms_async(self, output_encoding=PNG, quality=None,
parse_source_metadata=False,
transparent_substitution_rgb=None,
rpc=None):
"""Perform transformations on a given image - async version.
Args:
output_encoding: A value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG & WEBP quality control.
parse_source_metadata: when True the metadata (EXIF) of the source image
is parsed before any transformations. The results can be retrieved
via Image.get_original_metadata.
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
rpc: A UserRPC object.
Returns:
A UserRPC object.
Raises:
BadRequestError when there is something wrong with the request
specifications.
NotImageError when the image data given is not an image.
BadImageError when the image data given is corrupt.
LargeImageError when the image data given is too large to process.
InvalidBlobKeyError when the blob key provided is invalid.
TransformtionError when something errors during image manipulation.
AccessDeniedError: when the blobkey refers to a Google Storage object, and
the application does not have permission to access the object.
ValueError: when transparent_substitution_rgb is not an integer
Error when something unknown, but bad, happens.
"""
if output_encoding not in OUTPUT_ENCODING_TYPES:
raise BadRequestError("Output encoding type not in recognized set "
"%s" % OUTPUT_ENCODING_TYPES)
if not self._transforms:
raise BadRequestError("Must specify at least one transformation.")
if transparent_substitution_rgb:
if not isinstance(transparent_substitution_rgb, int):
raise ValueError(
"transparent_substitution_rgb must be a 32 bit integer")
self.CheckValidIntParameter(quality, 1, 100, "quality")
request = images_service_pb.ImagesTransformRequest()
response = images_service_pb.ImagesTransformResponse()
input_settings = request.mutable_input()
input_settings.set_correct_exif_orientation(
self._correct_orientation)
if parse_source_metadata:
input_settings.set_parse_metadata(True)
self._set_imagedata(request.mutable_image())
for transform in self._transforms:
request.add_transform().CopyFrom(transform)
request.mutable_output().set_mime_type(output_encoding)
if ((output_encoding == JPEG or output_encoding == WEBP) and
(quality is not None)):
request.mutable_output().set_quality(quality)
if transparent_substitution_rgb:
input_settings.set_transparent_substitution_rgb(
transparent_substitution_rgb)
def execute_transforms_hook(rpc):
"""Check success, handles exceptions and returns the converted RPC result.
Args:
rpc: A UserRPC object.
Raises:
See docstring for execute_transforms_async for more details.
"""
try:
rpc.check_success()
except apiproxy_errors.ApplicationError as e:
raise _ToImagesError(e, self._blob_key)
self._image_data = rpc.response.image().content()
self._blob_key = None
self._transforms = []
if response.image().has_width():
self._width = rpc.response.image().width()
else:
self._width = None
if response.image().has_height():
self._height = rpc.response.image().height()
else:
self._height = None
self._format = None
if response.source_metadata():
self._original_metadata = json.loads(response.source_metadata())
return self._image_data
return _make_async_call(rpc,
"Transform",
request,
response,
execute_transforms_hook,
None)
@property
def width(self):
"""Gets the width of the image."""
if self._width is None:
self._update_dimensions()
return self._width
@property
def height(self):
"""Gets the height of the image."""
if self._height is None:
self._update_dimensions()
return self._height
@property
def format(self):
"""Gets the format of the image."""
if self._format is None:
self._update_dimensions()
return self._format
def histogram(self, rpc=None):
"""Calculates the histogram of the image.
Args:
rpc: A UserRPC object.
Returns: 3 256-element lists containing the number of occurences of each
value of each color in the order RGB. As described at
http://en.wikipedia.org/wiki/Color_histogram for N = 256. i.e. the first
value of the first list contains the number of pixels with a red value of
0, the second the number with a red value of 1.
Raises:
NotImageError when the image data given is not an image.
BadImageError when the image data given is corrupt.
LargeImageError when the image data given is too large to process.
Error when something unknown, but bad, happens.
"""
rpc = self.histogram_async(rpc)
return rpc.get_result()
def histogram_async(self, rpc=None):
"""Calculates the histogram of the image - async version.
Args:
rpc: An optional UserRPC object.
Returns:
rpc: A UserRPC object.
Raises:
NotImageError when the image data given is not an image.
BadImageError when the image data given is corrupt.
LargeImageError when the image data given is too large to process.
Error when something unknown, but bad, happens.
"""
request = images_service_pb.ImagesHistogramRequest()
response = images_service_pb.ImagesHistogramResponse()
self._set_imagedata(request.mutable_image())
def get_histogram_hook(rpc):
"""Check success, handles exceptions and returns the converted RPC result.
Args:
rpc: A UserRPC object.
Raises:
See docstring for histogram_async for more details.
"""
try:
rpc.check_success()
except apiproxy_errors.ApplicationError as e:
raise _ToImagesError(e, self._blob_key)
histogram = rpc.response.histogram()
return [histogram.red_list(),
histogram.green_list(),
histogram.blue_list()]
return _make_async_call(rpc,
"Histogram",
request,
response,
get_histogram_hook,
None)
@staticmethod
def CheckValidIntParameter(parameter, min_value, max_value, name):
"""Checks that a parameters is an integer within the specified range."""
if parameter is not None:
if not isinstance(parameter, int):
raise TypeError("%s must be an integer." % name)
if parameter > max_value or parameter < min_value:
raise BadRequestError("%s must be between %s and %s."
% name, str(min_value), str(max_value))
def create_rpc(deadline=None, callback=None):
"""Creates an RPC object for use with the images API.
Args:
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
callback: Optional callable to invoke on completion.
Returns:
An apiproxy_stub_map.UserRPC object specialized for this service.
"""
return apiproxy_stub_map.UserRPC("images", deadline, callback)
def _make_async_call(rpc, method, request, response,
get_result_hook, user_data):
if rpc is None:
rpc = create_rpc()
rpc.make_call(method, request, response, get_result_hook, user_data)
return rpc
def resize(image_data, width=0, height=0, output_encoding=PNG, quality=None,
correct_orientation=UNCHANGED_ORIENTATION,
crop_to_fit=False, crop_offset_x=0.5, crop_offset_y=0.5,
allow_stretch=False, rpc=None, transparent_substitution_rgb=None):
"""Resize a given image file maintaining the aspect ratio.
If both width and height are specified, the more restricting of the two
values will be used when resizing the image. The maximum dimension allowed
for both width and height is 4000 pixels.
If both width and height are specified and crop_to_fit is True, the less
restricting of the two values will be used when resizing and the image will be
cropped to fit the specified size. In this case the center of cropping can be
adjusted by crop_offset_x and crop_offset_y.
Args:
image_data: str, source image data.
width: int, width (in pixels) to change the image width to.
height: int, height (in pixels) to change the image height to.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
correct_orientation: one of ORIENTATION_CORRECTION_TYPE, to indicate if
orientation correction should be performed during the transformation.
crop_to_fit: If True and both width and height are specified, the image is
cropped after resize to fit the specified dimensions.
crop_offset_x: float value between 0.0 and 1.0, 0 is left and 1 is right,
default is 0.5, the center of image.
crop_offset_y: float value between 0.0 and 1.0, 0 is top and 1 is bottom,
default is 0.5, the center of image.
allow_stretch: If True and both width and height are specified, the image
is stretched to fit the resize dimensions without maintaining the
aspect ratio.
rpc: Optional UserRPC object.
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
Raises:
TypeError when width or height not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given height or
width.
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
rpc = resize_async(image_data,
width=width,
height=height,
output_encoding=output_encoding,
quality=quality,
correct_orientation=correct_orientation,
crop_to_fit=crop_to_fit,
crop_offset_x=crop_offset_x,
crop_offset_y=crop_offset_y,
allow_stretch=allow_stretch,
rpc=rpc,
transparent_substitution_rgb=transparent_substitution_rgb)
return rpc.get_result()
def resize_async(image_data, width=0, height=0, output_encoding=PNG,
quality=None, correct_orientation=UNCHANGED_ORIENTATION,
crop_to_fit=False, crop_offset_x=0.5, crop_offset_y=0.5,
allow_stretch=False, rpc=None,
transparent_substitution_rgb=None):
"""Resize a given image file maintaining the aspect ratio - async version.
If both width and height are specified, the more restricting of the two
values will be used when resizing the image. The maximum dimension allowed
for both width and height is 4000 pixels.
If both width and height are specified and crop_to_fit is True, the less
restricting of the two values will be used when resizing and the image will be
cropped to fit the specified size. In this case the center of cropping can be
adjusted by crop_offset_x and crop_offset_y.
Args:
image_data: str, source image data.
width: int, width (in pixels) to change the image width to.
height: int, height (in pixels) to change the image height to.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
correct_orientation: one of ORIENTATION_CORRECTION_TYPE, to indicate if
orientation correction should be performed during the transformation.
crop_to_fit: If True and both width and height are specified, the image is
cropped after resize to fit the specified dimensions.
crop_offset_x: float value between 0.0 and 1.0, 0 is left and 1 is right,
default is 0.5, the center of image.
crop_offset_y: float value between 0.0 and 1.0, 0 is top and 1 is bottom,
default is 0.5, the center of image.
allow_stretch: If True and both width and height are specified, the image
is stretched to fit the resize dimensions without maintaining the
aspect ratio.
rpc: A UserRPC object.
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
Returns:
A UserRPC object, call get_result() to obtain the result of the RPC.
Raises:
TypeError when width or height not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given height or
width.
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.resize(width, height, crop_to_fit=crop_to_fit,
crop_offset_x=crop_offset_x, crop_offset_y=crop_offset_y,
allow_stretch=allow_stretch)
image.set_correct_orientation(correct_orientation)
return image.execute_transforms_async(output_encoding=output_encoding,
quality=quality,
rpc=rpc,
transparent_substitution_rgb=transparent_substitution_rgb)
def rotate(image_data, degrees, output_encoding=PNG, quality=None,
correct_orientation=UNCHANGED_ORIENTATION, rpc=None,
transparent_substitution_rgb=None):
"""Rotate a given image a given number of degrees clockwise.
Args:
image_data: str, source image data.
degrees: value from ROTATE_DEGREE_VALUES.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
correct_orientation: one of ORIENTATION_CORRECTION_TYPE, to indicate if
orientation correction should be performed during the transformation.
rpc: An optional UserRPC object.
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
Raises:
TypeError when degrees is not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given degrees.
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
rpc = rotate_async(image_data,
degrees,
output_encoding=output_encoding,
quality=quality,
correct_orientation=correct_orientation,
rpc=rpc,
transparent_substitution_rgb=transparent_substitution_rgb)
return rpc.get_result()
def rotate_async(image_data, degrees, output_encoding=PNG, quality=None,
correct_orientation=UNCHANGED_ORIENTATION, rpc=None,
transparent_substitution_rgb=None):
"""Rotate a given image a given number of degrees clockwise - async version.
Args:
image_data: str, source image data.
degrees: value from ROTATE_DEGREE_VALUES.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
correct_orientation: one of ORIENTATION_CORRECTION_TYPE, to indicate if
orientation correction should be performed during the transformation.
rpc: An optional UserRPC object.
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
Returns:
A UserRPC object, call get_result to complete the RPC and obtain the crop
result.
Raises:
TypeError when degrees is not either 'int' or 'long' types.
BadRequestError when there is something wrong with the given degrees.
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.rotate(degrees)
image.set_correct_orientation(correct_orientation)
return image.execute_transforms_async(output_encoding=output_encoding,
quality=quality,
rpc=rpc,
transparent_substitution_rgb=transparent_substitution_rgb)
def horizontal_flip(image_data, output_encoding=PNG, quality=None,
correct_orientation=UNCHANGED_ORIENTATION, rpc=None,
transparent_substitution_rgb=None):
"""Flip the image horizontally.
Args:
image_data: str, source image data.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
correct_orientation: one of ORIENTATION_CORRECTION_TYPE, to indicate if
orientation correction should be performed during the transformation.
rpc: An Optional UserRPC object
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
Raises:
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
rpc = horizontal_flip_async(image_data,
output_encoding=output_encoding,
quality=quality,
correct_orientation=correct_orientation,
rpc=rpc,
transparent_substitution_rgb=transparent_substitution_rgb)
return rpc.get_result()
def horizontal_flip_async(image_data, output_encoding=PNG, quality=None,
correct_orientation=UNCHANGED_ORIENTATION,
rpc=None,
transparent_substitution_rgb=None):
"""Flip the image horizontally - async version.
Args:
image_data: str, source image data.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
correct_orientation: one of ORIENTATION_CORRECTION_TYPE, to indicate if
orientation correction should be performed during the transformation.
rpc: An Optional UserRPC object
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
Returns:
A UserRPC object, call get_result to complete the RPC and obtain the crop
result.
Raises:
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.horizontal_flip()
image.set_correct_orientation(correct_orientation)
return image.execute_transforms_async(output_encoding=output_encoding,
quality=quality,
rpc=rpc,
transparent_substitution_rgb=transparent_substitution_rgb)
def vertical_flip(image_data, output_encoding=PNG, quality=None,
correct_orientation=UNCHANGED_ORIENTATION, rpc=None,
transparent_substitution_rgb=None):
"""Flip the image vertically.
Args:
image_data: str, source image data.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
correct_orientation: one of ORIENTATION_CORRECTION_TYPE, to indicate if
orientation correction should be performed during the transformation.
rpc: An Optional UserRPC object
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
Raises:
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
rpc = vertical_flip_async(image_data,
output_encoding=output_encoding,
quality=quality,
correct_orientation=correct_orientation,
rpc=rpc,
transparent_substitution_rgb=transparent_substitution_rgb)
return rpc.get_result()
def vertical_flip_async(image_data, output_encoding=PNG, quality=None,
correct_orientation=UNCHANGED_ORIENTATION, rpc=None,
transparent_substitution_rgb=None):
"""Flip the image vertically - async version.
Args:
image_data: str, source image data.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
correct_orientation: one of ORIENTATION_CORRECTION_TYPE, to indicate if
orientation correction should be performed during the transformation.
rpc: An Optional UserRPC object
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
Returns:
A UserRPC object, call get_result to complete the RPC and obtain the crop
result.
Raises:
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.vertical_flip()
image.set_correct_orientation(correct_orientation)
return image.execute_transforms_async(output_encoding=output_encoding,
quality=quality,
rpc=rpc,
transparent_substitution_rgb=transparent_substitution_rgb)
def crop(image_data, left_x, top_y, right_x, bottom_y, output_encoding=PNG,
quality=None, correct_orientation=UNCHANGED_ORIENTATION, rpc=None,
transparent_substitution_rgb=None):
"""Crop the given image.
The four arguments are the scaling numbers to describe the bounding box
which will crop the image. The upper left point of the bounding box will
be at (left_x*image_width, top_y*image_height) the lower right point will
be at (right_x*image_width, bottom_y*image_height).
Args:
image_data: str, source image data.
left_x: float value between 0.0 and 1.0 (inclusive).
top_y: float value between 0.0 and 1.0 (inclusive).
right_x: float value between 0.0 and 1.0 (inclusive).
bottom_y: float value between 0.0 and 1.0 (inclusive).
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
correct_orientation: one of ORIENTATION_CORRECTION_TYPE, to indicate if
orientation correction should be performed during the transformation.
rpc: A User RPC Object
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
Raises:
TypeError if the args are not of type 'float'.
BadRequestError when there is something wrong with the given bounding box.
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
rpc = crop_async(image_data, left_x, top_y, right_x, bottom_y,
output_encoding=output_encoding, quality=quality,
correct_orientation=correct_orientation, rpc=rpc,
transparent_substitution_rgb=transparent_substitution_rgb)
return rpc.get_result()
def crop_async(image_data, left_x, top_y, right_x, bottom_y,
output_encoding=PNG, quality=None,
correct_orientation=UNCHANGED_ORIENTATION, rpc=None,
transparent_substitution_rgb=None):
"""Crop the given image - async version.
The four arguments are the scaling numbers to describe the bounding box
which will crop the image. The upper left point of the bounding box will
be at (left_x*image_width, top_y*image_height) the lower right point will
be at (right_x*image_width, bottom_y*image_height).
Args:
image_data: str, source image data.
left_x: float value between 0.0 and 1.0 (inclusive).
top_y: float value between 0.0 and 1.0 (inclusive).
right_x: float value between 0.0 and 1.0 (inclusive).
bottom_y: float value between 0.0 and 1.0 (inclusive).
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
correct_orientation: one of ORIENTATION_CORRECTION_TYPE, to indicate if
orientation correction should be performed during the transformation.
rpc: An optional UserRPC object.
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
Returns:
A UserRPC object, call get_result to complete the RPC and obtain the crop
result.
Raises:
TypeError if the args are not of type 'float'.
BadRequestError when there is something wrong with the given bounding box.
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.crop(left_x, top_y, right_x, bottom_y)
image.set_correct_orientation(correct_orientation)
return image.execute_transforms_async(output_encoding=output_encoding,
quality=quality,
rpc=rpc,
transparent_substitution_rgb=transparent_substitution_rgb)
def im_feeling_lucky(image_data, output_encoding=PNG, quality=None,
correct_orientation=UNCHANGED_ORIENTATION, rpc=None,
transparent_substitution_rgb=None):
"""Automatically adjust image levels.
This is similar to the "I'm Feeling Lucky" button in Picasa.
Args:
image_data: str, source image data.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
correct_orientation: one of ORIENTATION_CORRECTION_TYPE, to indicate if
orientation correction should be performed during the transformation.
rpc: An optional UserRPC object.
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
Raises:
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
rpc = im_feeling_lucky_async(image_data,
output_encoding=output_encoding,
quality=quality,
correct_orientation=correct_orientation,
rpc=rpc,
transparent_substitution_rgb=transparent_substitution_rgb)
return rpc.get_result()
def im_feeling_lucky_async(image_data, output_encoding=PNG, quality=None,
correct_orientation=UNCHANGED_ORIENTATION, rpc=None,
transparent_substitution_rgb=None):
"""Automatically adjust image levels - async version.
This is similar to the "I'm Feeling Lucky" button in Picasa.
Args:
image_data: str, source image data.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
correct_orientation: one of ORIENTATION_CORRECTION_TYPE, to indicate if
orientation correction should be performed during the transformation.
rpc: An optional UserRPC object.
transparent_substition_rgb: When transparent pixels are not support in the
destination image format then transparent pixels will be substituted
for the specified color, which must be 32 bit rgb format.
Returns:
A UserRPC object.
Raises:
Error when something went wrong with the call. See Image.ExecuteTransforms
for more details.
"""
image = Image(image_data)
image.im_feeling_lucky()
image.set_correct_orientation(correct_orientation)
return image.execute_transforms_async(output_encoding=output_encoding,
quality=quality,
rpc=rpc,
transparent_substitution_rgb=transparent_substitution_rgb)
def composite(inputs, width, height, color=0, output_encoding=PNG,
quality=None, rpc=None):
"""Composite one or more images onto a canvas - async version.
Args:
inputs: a list of tuples (image_data, x_offset, y_offset, opacity, anchor)
where
image_data: str, source image data.
x_offset: x offset in pixels from the anchor position
y_offset: y offset in piyels from the anchor position
opacity: opacity of the image specified as a float in range [0.0, 1.0]
anchor: anchoring point from ANCHOR_POINTS. The anchor point of the image
is aligned with the same anchor point of the canvas. e.g. TOP_RIGHT would
place the top right corner of the image at the top right corner of the
canvas then apply the x and y offsets.
width: canvas width in pixels.
height: canvas height in pixels.
color: canvas background color encoded as a 32 bit unsigned int where each
color channel is represented by one byte in order ARGB.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
rpc: Optional UserRPC object.
Returns:
str, image data of the composited image.
Raises:
TypeError If width, height, color, x_offset or y_offset are not of type
int or long or if opacity is not a float
BadRequestError If more than MAX_TRANSFORMS_PER_REQUEST compositions have
been requested, if the canvas width or height is greater than 4000 or less
than or equal to 0, if the color is invalid or if for any composition
option, the opacity is outside the range [0,1] or the anchor is invalid.
"""
rpc = composite_async(inputs, width, height, color=color,
output_encoding=output_encoding, quality=quality,
rpc=rpc)
return rpc.get_result()
def composite_async(inputs, width, height, color=0, output_encoding=PNG,
quality=None, rpc=None):
"""Composite one or more images onto a canvas - async version.
Args:
inputs: a list of tuples (image_data, x_offset, y_offset, opacity, anchor)
where
image_data: str, source image data.
x_offset: x offset in pixels from the anchor position
y_offset: y offset in piyels from the anchor position
opacity: opacity of the image specified as a float in range [0.0, 1.0]
anchor: anchoring point from ANCHOR_POINTS. The anchor point of the image
is aligned with the same anchor point of the canvas. e.g. TOP_RIGHT would
place the top right corner of the image at the top right corner of the
canvas then apply the x and y offsets.
width: canvas width in pixels.
height: canvas height in pixels.
color: canvas background color encoded as a 32 bit unsigned int where each
color channel is represented by one byte in order ARGB.
output_encoding: a value from OUTPUT_ENCODING_TYPES.
quality: A value between 1 and 100 to specify the quality of the
encoding. This value is only used for JPEG quality control.
rpc: Optional UserRPC object.
Returns:
A UserRPC object.
Raises:
TypeError If width, height, color, x_offset or y_offset are not of type
int or long or if opacity is not a float
BadRequestError If more than MAX_TRANSFORMS_PER_REQUEST compositions have
been requested, if the canvas width or height is greater than 4000 or less
than or equal to 0, if the color is invalid or if for any composition
option, the opacity is outside the range [0,1] or the anchor is invalid.
"""
if (not isinstance(width, int) or
not isinstance(height, int) or
not isinstance(color, int)):
raise TypeError("Width, height and color must be integers.")
if output_encoding not in OUTPUT_ENCODING_TYPES:
raise BadRequestError("Output encoding type '%s' not in recognized set "
"%s" % (output_encoding, OUTPUT_ENCODING_TYPES))
if quality is not None:
if not isinstance(quality, int):
raise TypeError("Quality must be an integer.")
if quality > 100 or quality < 1:
raise BadRequestError("Quality must be between 1 and 100.")
if not inputs:
raise BadRequestError("Must provide at least one input")
if len(inputs) > MAX_COMPOSITES_PER_REQUEST:
raise BadRequestError("A maximum of %d composition operations can be"
"performed in a single request" %
MAX_COMPOSITES_PER_REQUEST)
if width <= 0 or height <= 0:
raise BadRequestError("Width and height must be > 0.")
if width > 4000 or height > 4000:
raise BadRequestError("Width and height must be <= 4000.")
if color > 0xffffffff or color < 0:
raise BadRequestError("Invalid color")
if color >= 0x80000000:
color -= 0x100000000
image_map = {}
request = images_service_pb.ImagesCompositeRequest()
response = images_service_pb.ImagesTransformResponse()
for (image, x, y, opacity, anchor) in inputs:
if not image:
raise BadRequestError("Each input must include an image")
if (not isinstance(x, int) or
not isinstance(y, int) or
not isinstance(opacity, (float))):
raise TypeError("x_offset, y_offset must be integers and opacity must"
"be a float")
if x > 4000 or x < -4000:
raise BadRequestError("xOffsets must be in range [-4000, 4000]")
if y > 4000 or y < -4000:
raise BadRequestError("yOffsets must be in range [-4000, 4000]")
if opacity < 0 or opacity > 1:
raise BadRequestError("Opacity must be in the range 0.0 to 1.0")
if anchor not in ANCHOR_TYPES:
raise BadRequestError("Anchor type '%s' not in recognized set %s" %
(anchor, ANCHOR_TYPES))
if image not in image_map:
image_map[image] = request.image_size()
if isinstance(image, Image):
image._set_imagedata(request.add_image())
else:
request.add_image().set_content(image)
option = request.add_options()
option.set_x_offset(x)
option.set_y_offset(y)
option.set_opacity(opacity)
option.set_anchor(anchor)
option.set_source_index(image_map[image])
request.mutable_canvas().mutable_output().set_mime_type(output_encoding)
request.mutable_canvas().set_width(width)
request.mutable_canvas().set_height(height)
request.mutable_canvas().set_color(color)
if ((output_encoding == JPEG or output_encoding == WEBP) and
(quality is not None)):
request.mutable_canvas().mutable_output().set_quality(quality)
def composite_hook(rpc):
"""Check success, handles exceptions and returns the converted RPC result.
Args:
rpc: A UserRPC object.
Returns:
Images bytes of the composite image.
Raises:
See docstring for composite_async for more details.
"""
try:
rpc.check_success()
except apiproxy_errors.ApplicationError as e:
raise _ToImagesError(e)
return rpc.response.image().content()
return _make_async_call(rpc,
"Composite",
request,
response,
composite_hook,
None)
def histogram(image_data, rpc=None):
"""Calculates the histogram of the given image.
Args:
image_data: str, source image data.
rpc: An optional UserRPC object.
Returns: 3 256-element lists containing the number of occurences of each
value of each color in the order RGB.
Raises:
NotImageError when the image data given is not an image.
BadImageError when the image data given is corrupt.
LargeImageError when the image data given is too large to process.
Error when something unknown, but bad, happens.
"""
rpc = histogram_async(image_data, rpc=rpc)
return rpc.get_result()
def histogram_async(image_data, rpc=None):
"""Calculates the histogram of the given image - async version.
Args:
image_data: str, source image data.
rpc: An optional UserRPC object.
Returns:
An UserRPC object.
Raises:
NotImageError when the image data given is not an image.
BadImageError when the image data given is corrupt.
LargeImageError when the image data given is too large to process.
Error when something unknown, but bad, happens.
"""
image = Image(image_data)
return image.histogram_async(rpc)
IMG_SERVING_SIZES_LIMIT = 1600
IMG_SERVING_SIZES = [
32, 48, 64, 72, 80, 90, 94, 104, 110, 120, 128, 144,
150, 160, 200, 220, 288, 320, 400, 512, 576, 640, 720,
800, 912, 1024, 1152, 1280, 1440, 1600]
IMG_SERVING_CROP_SIZES = [32, 48, 64, 72, 80, 104, 136, 144, 150, 160]
def get_serving_url(blob_key,
size=None,
crop=False,
secure_url=None,
filename=None,
rpc=None):
"""Obtain a url that will serve the underlying image.
This URL is served by a high-performance dynamic image serving infrastructure.
This URL format also allows dynamic resizing and crop with certain
restrictions. To get dynamic resizing and cropping, specify size and crop
arguments, or simply append options to the end of the default url obtained via
this call. Here is an example:
get_serving_url -> "http://lh3.ggpht.com/SomeCharactersGoesHere"
To get a 32 pixel sized version (aspect-ratio preserved) simply append
"=s32" to the url:
"http://lh3.ggpht.com/SomeCharactersGoesHere=s32"
To get a 32 pixel cropped version simply append "=s32-c":
"http://lh3.ggpht.com/SomeCharactersGoesHere=s32-c"
Available sizes are any integer in the range [0, 1600] and is available as
IMG_SERVING_SIZES_LIMIT.
Args:
blob_key: BlobKey, BlobInfo, str, or unicode representation of BlobKey of
blob to get URL of.
size: int, size of resulting images
crop: bool, True requests a cropped image, False a resized one.
secure_url: bool, True requests a https url, False requests a http url.
filename: The filename of a Google Storage object to get the URL of.
rpc: Optional UserRPC object.
Returns:
str, a url
Raises:
BlobKeyRequiredError: when no blobkey was specified in the ctor.
UnsupportedSizeError: when size parameters uses unsupported sizes.
BadRequestError: when crop/size are present in wrong combination, or a
blob_key and a filename have been specified.
TypeError: when secure_url is not a boolean type.
AccessDeniedError: when the blobkey refers to a Google Storage object, and
the application does not have permission to access the object.
ObjectNotFoundError:: when the blobkey refers to an object that no longer
exists.
"""
rpc = get_serving_url_async(blob_key, size, crop, secure_url, filename, rpc)
return rpc.get_result()
def get_serving_url_async(blob_key,
size=None,
crop=False,
secure_url=None,
filename=None,
rpc=None):
"""Obtain a url that will serve the underlying image - async version.
This URL is served by a high-performance dynamic image serving infrastructure.
This URL format also allows dynamic resizing and crop with certain
restrictions. To get dynamic resizing and cropping, specify size and crop
arguments, or simply append options to the end of the default url obtained via
this call. Here is an example:
get_serving_url -> "http://lh3.ggpht.com/SomeCharactersGoesHere"
To get a 32 pixel sized version (aspect-ratio preserved) simply append
"=s32" to the url:
"http://lh3.ggpht.com/SomeCharactersGoesHere=s32"
To get a 32 pixel cropped version simply append "=s32-c":
"http://lh3.ggpht.com/SomeCharactersGoesHere=s32-c"
Available sizes are any integer in the range [0, 1600] and is available as
IMG_SERVING_SIZES_LIMIT.
Args:
blob_key: BlobKey, BlobInfo, str, or unicode representation of BlobKey of
blob to get URL of.
size: int, size of resulting images
crop: bool, True requests a cropped image, False a resized one.
secure_url: bool, True requests a https url, False requests a http url.
filename: The filename of a Google Storage object to get the URL of.
rpc: Optional UserRPC object.
Returns:
A UserRPC whose result will be a string that is the serving url
Raises:
BlobKeyRequiredError: when no blobkey was specified in the ctor.
UnsupportedSizeError: when size parameters uses unsupported sizes.
BadRequestError: when crop/size are present in wrong combination, or a
blob_key and a filename have been specified.
TypeError: when secure_url is not a boolean type.
AccessDeniedError: when the blobkey refers to a Google Storage object, and
the application does not have permission to access the object.
"""
if not blob_key and not filename:
raise BlobKeyRequiredError(
"A Blobkey or a filename is required for this operation.")
if crop and not size:
raise BadRequestError("Size should be set for crop operation")
if size is not None and (size > IMG_SERVING_SIZES_LIMIT or size < 0):
raise UnsupportedSizeError("Unsupported size")
if secure_url and not isinstance(secure_url, bool):
raise TypeError("secure_url must be boolean.")
if filename and blob_key:
raise BadRequestError("Cannot specify a blob_key and a filename.");
if filename:
_blob_key = blobstore.create_gs_key(filename)
readable_blob_key = filename
else:
_blob_key = _extract_blob_key(blob_key)
readable_blob_key = blob_key
request = images_service_pb.ImagesGetUrlBaseRequest()
response = images_service_pb.ImagesGetUrlBaseResponse()
request.set_blob_key(_blob_key)
if secure_url:
request.set_create_secure_url(secure_url)
def get_serving_url_hook(rpc):
"""Check success, handle exceptions, and return converted RPC result.
Args:
rpc: A UserRPC object.
Returns:
The URL for serving the image.
Raises:
See docstring for get_serving_url for more details.
"""
try:
rpc.check_success()
except apiproxy_errors.ApplicationError as e:
raise _ToImagesError(e, readable_blob_key)
url = rpc.response.url()
if size is not None:
url += "=s%s" % size
if crop:
url += "-c"
return url
return _make_async_call(rpc,
"GetUrlBase",
request,
response,
get_serving_url_hook,
None)
def delete_serving_url(blob_key, rpc=None):
"""Delete a serving url that was created for a blob_key using get_serving_url.
Args:
blob_key: BlobKey, BlobInfo, str, or unicode representation of BlobKey of
blob that has an existing URL to delete.
rpc: Optional UserRPC object.
Raises:
BlobKeyRequiredError: when no blobkey was specified.
InvalidBlobKeyError: the blob_key supplied was invalid.
Error: There was a generic error deleting the serving url.
"""
rpc = delete_serving_url_async(blob_key, rpc)
rpc.get_result()
def delete_serving_url_async(blob_key, rpc=None):
"""Delete a serving url created using get_serving_url - async version.
Args:
blob_key: BlobKey, BlobInfo, str, or unicode representation of BlobKey of
blob that has an existing URL to delete.
rpc: Optional UserRPC object.
Returns:
A UserRPC object.
Raises:
BlobKeyRequiredError: when no blobkey was specified.
InvalidBlobKeyError: the blob_key supplied was invalid.
Error: There was a generic error deleting the serving url.
"""
if not blob_key:
raise BlobKeyRequiredError("A Blobkey is required for this operation.")
request = images_service_pb.ImagesDeleteUrlBaseRequest()
response = images_service_pb.ImagesDeleteUrlBaseResponse()
request.set_blob_key(_extract_blob_key(blob_key))
def delete_serving_url_hook(rpc):
"""Checks success, handles exceptions and returns the converted RPC result.
Args:
rpc: A UserRPC object.
Raises:
See docstring for delete_serving_url_async for more details.
"""
try:
rpc.check_success()
except apiproxy_errors.ApplicationError as e:
raise _ToImagesError(e, blob_key)
return _make_async_call(rpc,
"DeleteUrlBase",
request,
response,
delete_serving_url_hook,
None)
def _extract_blob_key(blob):
"""Extract a unicode blob key from a str, BlobKey, or BlobInfo.
Args:
blob: The str, unicode, BlobKey, or BlobInfo that contains the blob key.
"""
if isinstance(blob, str):
return blob.decode('utf-8')
elif isinstance(blob, BlobKey):
return str(blob).decode('utf-8')
elif blob.__class__.__name__ == 'BlobInfo':
return str(blob.key()).decode('utf-8')
return blob
|
{
"content_hash": "38d6b32b29c37b58cb404bf2fe9b6b9e",
"timestamp": "",
"source": "github",
"line_count": 1971,
"max_line_length": 80,
"avg_line_length": 37.26078132927448,
"alnum_prop": 0.6782587383069403,
"repo_name": "Suwmlee/XX-Net",
"id": "a3f1a93a0453048efda31f60524143a7c94cc465",
"size": "74046",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "gae_proxy/server/lib/google/appengine/api/images/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "200"
},
{
"name": "C",
"bytes": "33097"
},
{
"name": "CSS",
"bytes": "86345"
},
{
"name": "HTML",
"bytes": "141382"
},
{
"name": "JavaScript",
"bytes": "345991"
},
{
"name": "PHP",
"bytes": "10671"
},
{
"name": "Python",
"bytes": "17312939"
},
{
"name": "Shell",
"bytes": "4647"
},
{
"name": "Visual Basic",
"bytes": "382"
}
],
"symlink_target": ""
}
|
import simplejson
import sys
import urllib
import urllib2
class StreamingChannelsCore():
def __init__(self, instanceId = 10, platformId = 4, version = 10):
self.settings = sys.modules["__main__"].settings
self.plugin = sys.modules["__main__"].plugin
self.enabledebug = sys.modules["__main__"].enabledebug
self.url = sys.modules["__main__"].urlChannels
urllib2.install_opener(sys.modules["__main__"].opener)
def getChannelLibrary(self):
return self.getChannels('Channels', self.url)
def getChannels(self, channelName, url):
req = urllib2.Request(url)
f = urllib2.urlopen(req)
result = simplejson.load(f)
f.close()
return result[channelName]
def buildUrl(self, base_url, query):
return base_url + '?' + urllib.urlencode(query)
|
{
"content_hash": "60bcf480cbde77825b9dbfc64687efc3",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 67,
"avg_line_length": 28.11111111111111,
"alnum_prop": 0.7048748353096179,
"repo_name": "lperilla/kodi.plugin.streaming-channels",
"id": "6dbc86d9bfdc3a3d0da26860e5d4a4b3e2adca7b",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kodi.plugin.streaming-channels/StreamingChannelsCore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4233"
}
],
"symlink_target": ""
}
|
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon HealthLake"
prefix = "healthlake"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
CreateFHIRDatastore = Action("CreateFHIRDatastore")
CreateResource = Action("CreateResource")
DeleteFHIRDatastore = Action("DeleteFHIRDatastore")
DeleteResource = Action("DeleteResource")
DescribeFHIRDatastore = Action("DescribeFHIRDatastore")
DescribeFHIRExportJob = Action("DescribeFHIRExportJob")
DescribeFHIRImportJob = Action("DescribeFHIRImportJob")
GetCapabilities = Action("GetCapabilities")
ListFHIRDatastores = Action("ListFHIRDatastores")
ListFHIRExportJobs = Action("ListFHIRExportJobs")
ListFHIRImportJobs = Action("ListFHIRImportJobs")
ListTagsForResource = Action("ListTagsForResource")
ReadResource = Action("ReadResource")
SearchWithGet = Action("SearchWithGet")
SearchWithPost = Action("SearchWithPost")
StartFHIRExportJob = Action("StartFHIRExportJob")
StartFHIRImportJob = Action("StartFHIRImportJob")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
UpdateResource = Action("UpdateResource")
|
{
"content_hash": "3a870c3a247c066bb2e016ed1854fb7c",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 88,
"avg_line_length": 36.12820512820513,
"alnum_prop": 0.7579843860894251,
"repo_name": "cloudtools/awacs",
"id": "84b9cfa2664eddd3862d8d8bd333fed2764a96e1",
"size": "1525",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "awacs/healthlake.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "963483"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
# Register your models here.
from .models import signup
from .models import ipl_scores
admin.site.register(signup)
admin.site.register(ipl_scores)
|
{
"content_hash": "921b5e252f836ca621464573418c7464",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 32,
"avg_line_length": 22.75,
"alnum_prop": 0.8021978021978022,
"repo_name": "Rahul91/Django_IPL",
"id": "78eab6c37cfa53957ad5f6d540419cfd0d0c165a",
"size": "182",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "templates/signups/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "84832"
},
{
"name": "HTML",
"bytes": "202924"
},
{
"name": "JavaScript",
"bytes": "155406"
},
{
"name": "Python",
"bytes": "18854"
}
],
"symlink_target": ""
}
|
import asyncio
from cogbot.cogs.robo_mod.robo_mod_condition import RoboModCondition
from cogbot.cogs.robo_mod.robo_mod_trigger import RoboModTrigger
class MessageHasEmbedCondition(RoboModCondition):
def __init__(self):
self.min_count: int = None
self.delay: int = None
async def update(self, state: "RoboModServerState", data: dict):
self.min_count = data.get("min_count", 1)
self.delay = data.get("delay", 0)
async def check(self, trigger: RoboModTrigger) -> bool:
# Optionally give the client cache some time to update.
if self.delay > 0:
await asyncio.sleep(self.delay / 1000)
return len(trigger.message.embeds) >= self.min_count
|
{
"content_hash": "0b8f2da58f62b6a2b8d78c3499e2a662",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 68,
"avg_line_length": 35.9,
"alnum_prop": 0.6768802228412256,
"repo_name": "Arcensoth/cogbot",
"id": "eddc773cab0fc23f007e7e16fef603ebdb40a7c8",
"size": "718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cogbot/cogs/robo_mod/conditions/message_has_embed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "193760"
}
],
"symlink_target": ""
}
|
from toast.math.vector import Vector2D
from toast.math.matrix import MatrixHelper
from toast.scene_graph import Component
class Transform(Component):
def __init__(self):
super(Transform, self).__init__()
self.__local_matrix = None
self.global_matrix = None
self.__position = Vector2D.Zero()
self.__rotation = 0
self.__scale = Vector2D(1.0, 1.0)
@property
def matrix(self):
if self.__local_matrix == None:
t = MatrixHelper.translation_matrix(int(self.__position[0]), int(self.__position[1]))
r = MatrixHelper.rotation_matrix(self.__rotation)
s = MatrixHelper.scale_matrix(self.__scale[0], self.__scale[1])
self.__local_matrix = t * r * s
if self.global_matrix == None:
if hasattr(self.game_object.parent, 'transform'):
p = self.game_object.parent.transform.matrix
self.global_matrix = p * self.__local_matrix
else:
return self.__local_matrix
return self.global_matrix
def mark_dirty(self):
if not self.game_object:
return
self.global_matrix = None
self.__local_matrix = None
for child_transform in [c.transform for c in self.game_object.children]:
child_transform.mark_dirty()
@property
def position(self):
return Vector2D(self.matrix[0][2], self.matrix[1][2])
@position.setter
def position(self, other):
self.__position.x = other[0]
self.__position.y = other[1]
self.mark_dirty()
@property
def rotation(self):
a = self.matrix * (1, 0)
b = self.position
return (a - b).angle
@rotation.setter
def rotation(self, rotation):
self.__rotation = rotation * 0.0174532925
self.mark_dirty()
@property
def scale(self):
sx = (self.matrix * (1, 0)) - self.position
sy = (self.matrix * (0, 1)) - self.position
return Vector2D(sx.magnitude, sy.magnitude)
@scale.setter
def scale(self, scale):
self.__scale = scale
self.mark_dirty()
@property
def forward(self):
f = Vector2D.from_angle(self.rotation)
return f
@property
def right(self):
r = Vector2D.from_angle(self.rotation - 90.0)
r[1] = -r[1]
return r
@property
def offset(self):
return self.__offset
def look_at(self, pos):
angle = (pos - self.position).angle
self.rotation = -angle
|
{
"content_hash": "3fd1a5fad2d4cbb8022a595178f823e4",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 97,
"avg_line_length": 28.69148936170213,
"alnum_prop": 0.5439377085650723,
"repo_name": "JSkelly/Toast",
"id": "fec37355db51cc2f9278cff2b194fcde35aeb840",
"size": "2697",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "toast/transform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132965"
}
],
"symlink_target": ""
}
|
"""freeze module
The freeze and unfreeze functions special-case some of the built-in
types. If these types grow the appropriate methods then that will
become unnecessary.
"""
__all__ = ("Freezable", "UnFreezable", "freeze", "unfreeze")
import builtins
from abc import ABCMeta, abstractmethod
class Freezable(metaclass=ABCMeta):
@abstractmethod
def __freeze__(self):
"""Returns an immutable version of this object."""
class UnFreezable(metaclass=ABCMeta):
@abstractmethod
def __unfreeze__(self):
"""Returns a mutable version of this object."""
def freeze(obj):
"""Returns the immutable version of the object."""
if hasattr(type(obj), "__freeze__"):
return obj.__freeze__()
try:
handler = _freeze_registry[type(obj)]
except KeyError:
pass
else:
return handler(obj)
#if hasattr(type(obj), "__unfreeze__"):
# return obj
msg = "Don't know how to freeze a {} object"
raise TypeError(msg.format(type(obj)))
def unfreeze(obj, strict=False):
if hasattr(type(obj), "__unfreeze__"):
return obj.__unfreeze__()
try:
handler = _unfreeze_registry[type(obj)]
except KeyError:
pass
else:
return handler(obj)
#if hasattr(type(obj), "__freeze__"):
# return obj
msg = "Don't know how to unfreeze a {} object"
raise TypeError(msg.format(type(obj)))
#################################################
# special-casing built-in types
_freeze_registry = {}
_unfreeze_registry = {}
def register(f, cls=None):
action, typename = f.__name__.split("_")
if cls is None:
cls = getattr(builtins, typename)
if action == "freeze":
_freeze_registry[cls] = f
Freezable.register(cls)
elif action == "unfreeze":
_unfreeze_registry[cls] = f
UnFreezable.register(cls)
else:
raise TypeError
return f
@register
def freeze_dict(obj):
raise NotImplementedError
@register
def unfreeze_dict(obj):
return obj
@register
def freeze_list(obj):
return tuple(obj)
@register
def unfreeze_list(obj):
return obj
@register
def freeze_tuple(obj):
return obj
@register
def unfreeze_tuple(obj):
return list(obj)
|
{
"content_hash": "de6141180e1caba7a98386443bd2955f",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 67,
"avg_line_length": 20.833333333333332,
"alnum_prop": 0.6182222222222222,
"repo_name": "ActiveState/code",
"id": "3b1c99445d6e49d4311af88a6faa88fb8006526b",
"size": "2250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/577895_Protocol_Making_Objects/recipe-577895.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('event_exim', '0001_initial'),
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('address1', models.CharField(blank=True, max_length=765, null=True)),
('address2', models.CharField(blank=True, max_length=765, null=True)),
('city', models.CharField(blank=True, max_length=765, null=True)),
('state', models.CharField(blank=True, max_length=765, null=True)),
('region', models.CharField(blank=True, max_length=765, null=True)),
('postal', models.CharField(blank=True, max_length=765, null=True)),
('zip', models.CharField(blank=True, max_length=15, null=True)),
('plus4', models.CharField(blank=True, max_length=12, null=True)),
('country', models.CharField(blank=True, max_length=765, null=True)),
('longitude', models.FloatField(blank=True, null=True)),
('latitude', models.FloatField(blank=True, null=True)),
('title', models.CharField(max_length=765)),
('starts_at', models.DateTimeField(blank=True, db_index=True, null=True)),
('ends_at', models.DateTimeField(blank=True, null=True)),
('starts_at_utc', models.DateTimeField(blank=True, null=True)),
('ends_at_utc', models.DateTimeField(blank=True, null=True)),
('status', models.CharField(choices=[('active', 'active'), ('cancelled', 'cancelled'), ('deleted', 'deleted')], db_index=True, max_length=96)),
('host_is_confirmed', models.IntegerField()),
('is_private', models.IntegerField(choices=[(0, 'public'), (1, 'private')], verbose_name='private or public')),
('is_approved', models.IntegerField()),
('attendee_count', models.IntegerField()),
('max_attendees', models.IntegerField(blank=True, null=True)),
('venue', models.CharField(max_length=765)),
('phone', models.CharField(max_length=765)),
('public_description', models.TextField()),
('directions', models.TextField()),
('note_to_attendees', models.TextField()),
('notes', models.TextField()),
('organization_official_event', models.NullBooleanField()),
('event_type', models.CharField(max_length=765)),
('organization_campaign', models.CharField(db_index=True, max_length=765)),
('is_searchable', models.IntegerField(choices=[(0, 'not searchable'), (1, 'searchable')])),
('private_phone', models.CharField(max_length=765)),
('url', models.URLField(blank=True)),
('slug', models.SlugField(blank=True, max_length=255, null=True)),
('osdi_origin_system', models.CharField(max_length=765)),
('ticket_type', models.IntegerField(choices=[(0, 'ticketed'), (1, 'open'), (2, 'ticketed')])),
('share_url', models.URLField(blank=True)),
('political_scope', models.CharField(blank=True, max_length=765, null=True)),
('venue_category', models.IntegerField(choices=[(0, 'unknown'), (1, 'private home'), (2, 'public space'), (3, 'target location (e.g. congressional district office)'), (4, 'virtual')])),
('needs_organizer_help', models.IntegerField(blank=True, default=0, null=True)),
('rsvp_url', models.URLField(blank=True)),
('event_facebook_url', models.URLField(blank=True)),
('organization_status_review', models.CharField(blank=True, choices=[('', 'New'), ('reviewed', 'Reviewed'), ('vetted', 'Vetted'), ('questionable', 'Questionable'), ('limbo', 'Limbo')], db_index=True, max_length=32)),
('organization_status_prep', models.CharField(blank=True, choices=[('', 'Unclaimed'), ('claimed', 'Claimed'), ('partially_prepped', 'Partially prepped'), ('fully_prepped', 'Fully prepped'), ('nocontact', 'Unable to contact')], db_index=True, max_length=32)),
('dupe_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='event_store.Event')),
],
),
migrations.CreateModel(
name='Host',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hashed_email', models.CharField(blank=True, max_length=765, null=True)),
('email', models.CharField(blank=True, max_length=765, null=True)),
('name', models.CharField(blank=True, max_length=765, null=True)),
('host_system_id', models.CharField(max_length=765)),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=765)),
('facebook', models.CharField(max_length=128)),
('twitter', models.CharField(help_text='do not include @', max_length=128)),
('url', models.URLField(blank=True)),
('slug', models.CharField(max_length=128)),
('logo_thumb', models.URLField(blank=True)),
('logo_big', models.URLField(blank=True)),
('privacy_policy', models.URLField(blank=True)),
('terms_and_conditions', models.URLField(blank=True)),
('osdi_source_id', models.CharField(max_length=128)),
('api_key', models.CharField(editable=False, max_length=765)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.Group')),
],
),
migrations.AddField(
model_name='event',
name='organization',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='event_store.Organization'),
),
migrations.AddField(
model_name='event',
name='organization_host',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='event_store.Host'),
),
migrations.AddField(
model_name='event',
name='organization_source',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='event_exim.EventSource'),
),
]
|
{
"content_hash": "ceb1753a150e18746efce988d8c27163",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 274,
"avg_line_length": 62.93859649122807,
"alnum_prop": 0.5763066202090592,
"repo_name": "MoveOnOrg/eventroller",
"id": "d1658ca50388b3b99408a2e1cb2670f88d128fc7",
"size": "7247",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "event_store/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "609"
},
{
"name": "HTML",
"bytes": "7391"
},
{
"name": "JavaScript",
"bytes": "24435"
},
{
"name": "Python",
"bytes": "182608"
}
],
"symlink_target": ""
}
|
"""
wiggle - Plot z=f(x,y) anomalies along tracks.
"""
from pygmt.clib import Session
from pygmt.helpers import build_arg_string, fmt_docstring, kwargs_to_strings, use_alias
@fmt_docstring
@use_alias(
B="frame",
D="position",
G="color",
J="projection",
R="region",
T="track",
U="timestamp",
V="verbose",
W="pen",
X="xshift",
Y="yshift",
Z="scale",
c="panel",
i="columns",
p="perspective",
)
@kwargs_to_strings(R="sequence", c="sequence_comma", i="sequence_comma", p="sequence")
def wiggle(self, x=None, y=None, z=None, data=None, **kwargs):
r"""
Plot z=f(x,y) anomalies along tracks.
Takes a matrix, (x,y,z) triplets, or a file name as input and plots z as a
function of distance along track.
Must provide either ``data`` or ``x``/``y``/``z``.
Full parameter list at :gmt-docs:`wiggle.html`
{aliases}
Parameters
----------
x/y/z : 1d arrays
The arrays of x and y coordinates and z data points.
data : str or {table-like}
Pass in either a file name to an ASCII data table, a 2D
{table-classes}.
Use parameter ``columns`` to choose which columns are x, y, z,
respectively.
{J}
{R}
scale : str or float
Gives anomaly scale in data-units/distance-unit. Append **c**, **i**,
or **p** to indicate the distance unit (cm, inch, or point); if no unit
is given we use the default unit that is controlled by
:gmt-term:`PROJ_LENGTH_UNIT`.
{B}
position : str
[**g**\|\ **j**\|\ **J**\|\ **n**\|\ **x**]\ *refpoint*\
**+w**\ *length*\ [**+j**\ *justify*]\ [**+al**\|\ **r**]\
[**+o**\ *dx*\ [/*dy*]][**+l**\ [*label*]].
Defines the reference point on the map for the vertical scale bar.
color : str
Set fill shade, color or pattern for positive and/or negative wiggles
[Default is no fill]. Optionally, append **+p** to fill positive areas
(this is the default behavior). Append **+n** to fill negative areas.
Append **+n+p** to fill both positive and negative areas with the same
fill. Note: You will need to repeat the color parameter to select
different fills for the positive and negative wiggles.
track : str
Draw track [Default is no track]. Append pen attributes to use
[Default is **0.25p,black,solid**].
{U}
{V}
pen : str
Specify outline pen attributes [Default is no outline].
{XY}
{c}
columns : str or 1d array
Choose which columns are x, y, and z, respectively if input is provided
via *data*. E.g. ``columns = [0, 1, 2]`` or ``columns = "0,1,2"`` if
the *x* values are stored in the first column, *y* values in the second
one and *z* values in the third one. Note: zero-based indexing is used.
{p}
"""
kwargs = self._preprocess(**kwargs) # pylint: disable=protected-access
with Session() as lib:
# Choose how data will be passed in to the module
file_context = lib.virtualfile_from_data(
check_kind="vector", data=data, x=x, y=y, z=z
)
with file_context as fname:
arg_str = " ".join([fname, build_arg_string(kwargs)])
lib.call_module("wiggle", arg_str)
|
{
"content_hash": "fdb98504891eb14219746c413e26978f",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 87,
"avg_line_length": 34.489583333333336,
"alnum_prop": 0.5871337964361221,
"repo_name": "GenericMappingTools/gmt-python",
"id": "ddabb48c35ef770e243a55567aae1d203e606d21",
"size": "3311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygmt/src/wiggle.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1357"
},
{
"name": "Python",
"bytes": "292740"
},
{
"name": "Shell",
"bytes": "357"
}
],
"symlink_target": ""
}
|
import os
import re
def get_version():
"""
Extracts the version number from the version.py file.
"""
VERSION_FILE = '../{{ project_name }}/version.py'
mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
#'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'toc'
# General information about the project.
project = u'{{ project_name }}'
copyright = u'{{ current_year }}, {{ author_name }}'
# The short X.Y version.
version = get_version()
# The full version, including alpha/beta/rc tags.
release = version
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
intersphinx_mapping = {
'python': ('http://docs.python.org/3.4', None),
}
# -- Options for HTML output ----------------------------------------------
html_theme = 'default'
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# Output file base name for HTML help builder.
htmlhelp_basename = '{{ repo_name }}doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', '{{ repo_name }}.tex', u'{{ repo_name }} Documentation',
u'{{ author_name }}', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', '{{ repo_name }}', u'{{ repo_name }} Documentation',
[u'{{ author_name }}'], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', '{{ repo_name }}', u'{{ repo_name }} Documentation',
u'{{ author_name }}', '{{ repo_name }}', 'A short description',
'Miscellaneous'),
]
|
{
"content_hash": "8f43cb3993aa053938fed8534a45a7b4",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 96,
"avg_line_length": 31.29230769230769,
"alnum_prop": 0.6403638151425762,
"repo_name": "micahhausler/python-template",
"id": "119b96198ecd6b6e7becbf024ae8ae8abfa41198",
"size": "4152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7269"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('game', '0011_deployedtaskimage'),
]
operations = [
migrations.RemoveField(
model_name='deployedtaskimage',
name='task',
),
migrations.RemoveField(
model_name='deployedtaskimage',
name='uploaded_image',
),
migrations.DeleteModel(
name='DeployedTaskImage',
),
]
|
{
"content_hash": "6bfddd286d1a86e4617a8c26d766b659",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 43,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.5708955223880597,
"repo_name": "stefantsov/blackbox3",
"id": "486951b9f4dc257e59f849d0f58b2b99ab522e0d",
"size": "560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/migrations/0012_auto_20151103_1658.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2241"
},
{
"name": "HTML",
"bytes": "39706"
},
{
"name": "JavaScript",
"bytes": "133810"
},
{
"name": "Python",
"bytes": "95948"
}
],
"symlink_target": ""
}
|
import abc
import mock
import six
from ceilometer.ipmi import manager
import ceilometer.tests.base as base
from oslotest import mockpatch
@six.add_metaclass(abc.ABCMeta)
class TestPollsterBase(base.BaseTestCase):
@abc.abstractmethod
def fake_data(self):
"""Fake data used for test."""
@abc.abstractmethod
def fake_sensor_data(self, sensor_type):
"""Fake sensor data used for test."""
@abc.abstractmethod
def make_pollster(self):
"""Produce right pollster for test."""
def _test_get_samples(self):
nm = mock.Mock()
nm.read_temperature_all.side_effect = self.fake_data
nm.read_power_all.side_effect = self.fake_data
nm.read_sensor_any.side_effect = self.fake_sensor_data
# We should mock the pollster first before initialize the Manager
# so that we don't trigger the sudo in pollsters' __init__().
self.useFixture(mockpatch.Patch(
'ceilometer.ipmi.platform.intel_node_manager.NodeManager',
return_value=nm))
self.useFixture(mockpatch.Patch(
'ceilometer.ipmi.platform.ipmi_sensor.IPMISensor',
return_value=nm))
self.mgr = manager.AgentManager()
self.pollster = self.make_pollster()
def _verify_metering(self, length, expected_vol=None, node=None):
cache = {}
resources = {}
samples = list(self.pollster.get_samples(self.mgr, cache, resources))
self.assertEqual(length, len(samples))
if expected_vol:
self.assertTrue(any(s.volume == expected_vol for s in samples))
if node:
self.assertTrue(any(s.resource_metadata['node'] == node
for s in samples))
|
{
"content_hash": "d418ce625f0e0d23ff28a9dcd27750b9",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 31.214285714285715,
"alnum_prop": 0.63558352402746,
"repo_name": "luogangyi/Ceilometer-oVirt",
"id": "90d11c7532871ab6e5aa7cc536921b1ba63e557d",
"size": "2365",
"binary": false,
"copies": "3",
"ref": "refs/heads/stable/juno",
"path": "ceilometer/tests/ipmi/pollsters/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5438675"
},
{
"name": "Shell",
"bytes": "1304"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_auto_20150616_2002'),
]
operations = [
migrations.AddField(
model_name='post',
name='slug',
field=models.SlugField(default='', verbose_name='Endereço'),
preserve_default=False,
),
]
|
{
"content_hash": "99fda0ba6b8ae514e37bce780508c0bd",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 72,
"avg_line_length": 22.473684210526315,
"alnum_prop": 0.5831381733021077,
"repo_name": "patrickporto/soldajustica",
"id": "b17cfbe9cc828a0479a763408f94afd6bb439939",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soldajustica/blog/migrations/0004_post_slug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10574"
},
{
"name": "HTML",
"bytes": "18420"
},
{
"name": "JavaScript",
"bytes": "10714"
},
{
"name": "Python",
"bytes": "35677"
}
],
"symlink_target": ""
}
|
from .search_results_answer import SearchResultsAnswer
class Videos(SearchResultsAnswer):
"""Defines a video answer.
Variables are only populated by the server, and will be ignored when
sending a request.
:param _type: Constant filled by server.
:type _type: str
:ivar id: A String identifier.
:vartype id: str
:ivar web_search_url: The URL To Bing's search result for this item.
:vartype web_search_url: str
:ivar follow_up_queries:
:vartype follow_up_queries:
list[~azure.cognitiveservices.search.websearch.models.Query]
:ivar query_context:
:vartype query_context:
~azure.cognitiveservices.search.websearch.models.QueryContext
:ivar total_estimated_matches: The estimated number of webpages that are
relevant to the query. Use this number along with the count and offset
query parameters to page the results.
:vartype total_estimated_matches: long
:ivar is_family_friendly:
:vartype is_family_friendly: bool
:param value: A list of video objects that are relevant to the query.
:type value:
list[~azure.cognitiveservices.search.websearch.models.VideoObject]
:ivar next_offset:
:vartype next_offset: int
:ivar query_expansions:
:vartype query_expansions:
list[~azure.cognitiveservices.search.websearch.models.Query]
:ivar related_searches:
:vartype related_searches:
list[~azure.cognitiveservices.search.websearch.models.Query]
"""
_validation = {
'_type': {'required': True},
'id': {'readonly': True},
'web_search_url': {'readonly': True},
'follow_up_queries': {'readonly': True},
'query_context': {'readonly': True},
'total_estimated_matches': {'readonly': True},
'is_family_friendly': {'readonly': True},
'value': {'required': True},
'next_offset': {'readonly': True},
'query_expansions': {'readonly': True},
'related_searches': {'readonly': True},
}
_attribute_map = {
'_type': {'key': '_type', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'web_search_url': {'key': 'webSearchUrl', 'type': 'str'},
'follow_up_queries': {'key': 'followUpQueries', 'type': '[Query]'},
'query_context': {'key': 'queryContext', 'type': 'QueryContext'},
'total_estimated_matches': {'key': 'totalEstimatedMatches', 'type': 'long'},
'is_family_friendly': {'key': 'isFamilyFriendly', 'type': 'bool'},
'value': {'key': 'value', 'type': '[VideoObject]'},
'next_offset': {'key': 'nextOffset', 'type': 'int'},
'query_expansions': {'key': 'queryExpansions', 'type': '[Query]'},
'related_searches': {'key': 'relatedSearches', 'type': '[Query]'},
}
def __init__(self, value):
super(Videos, self).__init__()
self.value = value
self.next_offset = None
self.query_expansions = None
self.related_searches = None
self._type = 'Videos'
|
{
"content_hash": "a1c7789fe65bcf6c740d9fee2806eb01",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 84,
"avg_line_length": 40.013333333333335,
"alnum_prop": 0.6221259580139953,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "e3d3731268d58be241c8dc3c5be51f752d765ad8",
"size": "3475",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-cognitiveservices-search-websearch/azure/cognitiveservices/search/websearch/models/videos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import sprinter.lib as lib
from mock import Mock, patch
from sprinter.testtools import FormulaTest
from sprinter.formula.base import FormulaBase
source_config = """
[prompt_value_source]
formula = sprinter.formula.base
source_value = old
[resolve]
formula = sprinter.formula.base
systems = osx
depends = prompt_value_source
"""
target_config = """
[install_with_rc]
formula = sprinter.formula.base
rc = teststring
[install_with_command]
formula = sprinter.formula.base
command = echo 'helloworld'
[osx]
systems = osx
formula = sprinter.formula.base
[osx2]
systems = OsX
formula = sprinter.formula.base
[debian]
systems = debian
formula = sprinter.formula.base
[prompt_value]
formula = sprinter.formula.base
existing_value = here
[prompt_value_source]
formula = sprinter.formula.base
[resolve]
formula = sprinter.formula.base
"""
class TestFormulaBase(FormulaTest):
"""Tests for the formula base"""
def setup(self):
super(TestFormulaBase, self).setup(
source_config=source_config, target_config=target_config
)
def test_install_with_rc(self):
"""Test install with rc"""
self.directory.add_to_rc = Mock()
self.environment.run_feature("install_with_rc", "sync")
self.directory.add_to_rc.assert_called_once_with("teststring")
@patch.object(lib, "call")
def test_install_with_command(self, call):
"""Test install with command"""
self.environment.run_feature("install_with_command", "sync")
call.assert_called_once_with("echo 'helloworld'", cwd=None, shell=True)
def test_osx_only(self):
"""Test a feature that should only occur on osx"""
fb = FormulaBase(
self.environment,
"osx",
target=self.environment.target.get_feature_config("osx"),
)
fb2 = FormulaBase(
self.environment,
"osx2",
target=self.environment.target.get_feature_config("osx2"),
)
with patch("sprinter.lib.system.is_osx") as is_osx:
is_osx.return_value = True
assert fb.should_run()
assert fb2.should_run()
is_osx.return_value = False
assert not fb.should_run()
assert not fb2.should_run()
def test_debianbased_only(self):
"""Test a feature that should only occur on debian-based distributions"""
fb = FormulaBase(
self.environment,
"debian",
target=self.environment.target.get_feature_config("debian"),
)
with patch("sprinter.lib.system.is_debian") as is_debian:
is_debian.return_value = True
assert fb.should_run()
is_debian.return_value = False
assert not fb.should_run()
def test_prompt_value(self):
"""_prompt_value should prompt a value if it does not exist in the target"""
fb = FormulaBase(
self.environment,
"prompt_value",
target=self.environment.target.get_feature_config("prompt_value"),
)
with patch("sprinter.lib.prompt") as prompt:
prompt.return_value = "foo"
fb._prompt_value("existing_value", "this value exists")
assert fb.target.get("existing_value") == "here"
fb._prompt_value("non_existing_value", "this value doesn't exists")
assert fb.target.get("non_existing_value") == "foo"
def test_prompt_value_default(self):
"""_prompt_value default should be overwritten by the source if it exists"""
fb = FormulaBase(
self.environment,
"prompt_value",
source=self.environment.source.get_feature_config("prompt_value_source"),
target=self.environment.target.get_feature_config("prompt_value_source"),
)
with patch("sprinter.lib.prompt") as prompt:
prompt.return_value = "foo"
fb._prompt_value("source_value", "this value exists")
prompt.assert_called_once_with("this value exists", default="old")
def test_resolve_dont_carry_over_options(self):
"""resolve should resolve non-carry over options"""
fb = FormulaBase(
self.environment,
"prompt_value",
source=self.environment.source.get_feature_config("resolve"),
target=self.environment.target.get_feature_config("resolve"),
)
fb.resolve()
assert not fb.target.has("systems")
assert not fb.target.has("depends")
|
{
"content_hash": "afd3e8f9a95a101b031fe194caa888ed",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 85,
"avg_line_length": 32.38297872340426,
"alnum_prop": 0.6263688129653964,
"repo_name": "toumorokoshi/sprinter",
"id": "e03c70ef4795967120db0a7b2e286df991e1fdb4",
"size": "4566",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sprinter/formula/tests/test_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "528"
},
{
"name": "Python",
"bytes": "267773"
},
{
"name": "Shell",
"bytes": "2058"
}
],
"symlink_target": ""
}
|
import os
class MongoConfig(object):
MONGODB_DB = 'dev'
MONGODB_HOST = os.environ.get('MONGO_PORT_27017_TCP_ADDR')
MONGODB_PORT = os.environ.get('MONGO_PORT_27017_TCP_PORT')
|
{
"content_hash": "528c29564f20347b7f0778151a8f7b94",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 62,
"avg_line_length": 26.857142857142858,
"alnum_prop": 0.6914893617021277,
"repo_name": "piotrdubiel/scribeserver",
"id": "17e7245872153fc465da020d964eecbaf9325a66",
"size": "188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/docker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "491"
},
{
"name": "HTML",
"bytes": "5449"
},
{
"name": "Python",
"bytes": "81995"
}
],
"symlink_target": ""
}
|
LAT_TT = (
"Latitude of the location where the sample was collected. Needs to be entered in decimal degrees. "
"Negative values for South latitudes. (Coordinate system: WGS84)"
)
LONG_TT = (
"Longitude of the location where the sample was collected. Needs to be entered in decimal degrees. "
"Negative values for West longitudes. (Coordinate system: WGS84)"
)
ELEVATION_TT = "Elevation at which a sample was collected (in meters). Use negative values for depth below sea level"
ROCK_TYPES = ("Igneous", "Metamorphic", "Ore", "Sedimentary", "Xenolithic")
SUB_ROCK_TYPES = {
"Igneous": ("Plutonic", "Volcanic"),
"Metamorphic": [],
"Ore": [],
"Sedimentary": [],
"Xenolithic": [],
}
ROCK_TYPE_DETAILS = {
"Igneous": ("Exotic", "Felsic", "Intermediate", "Mafic", "Ultramafic"),
"Metamorphic": (
"Calc-Silicate",
"Eclogite",
"Gneiss",
"Granofels",
"Granulite",
"MechanicallyBroken",
"Meta-Carbonate",
"Meta-Ultramafic",
"Metasedimentary",
"Metasomatic",
"Schist",
"Slate",
),
"Ore": ("Other", "Oxide", "Sulfide"),
"Sedimentary": (
"Carbonate",
"ConglomerateAndOrBreccia",
"Evaporite",
"GlacialAndOrPaleosol",
"Hybrid",
"Ironstone",
"MixedCarbAndOrSiliciclastic",
"MnNoduleAndOrCrust",
"SiliceousBiogenic",
"Siliciclastic",
"Volcaniclastic",
),
"Xenolithic": [],
}
SAMPLE_TYPES = (
"Bead",
"Chemical Fraction",
"Core",
"Core Half Round",
"Core Piece",
"Core Quarter Round",
"Core Section",
"Core Section Half",
"Core Sub-Piece",
"Core Whole Round",
"CTD",
"Cube",
"Culture",
"Cuttings",
"Cylinder",
"Dredge",
"Gas",
"Grab",
"Hole",
"Individual Sample",
"Liquid",
"Mechanical Fraction",
"Oriented Core",
"Powder",
"Rock Powder",
"Site",
"Slab",
"Smear",
"Specimen",
"Squeeze Cake",
"Terrestrial Section",
"Thin Section",
"Toothpick",
"Trawl",
"U-Channel",
"Wedge",
"Other",
)
SAMPLE_ATTRS = (
("user_code", "", True),
("sample_type", "", True),
("name", "", True),
("material", "", True),
("description", "", False),
("age_min", "", False),
("age_max", "", False),
("age_unit", "", False),
("collection_method", "", False),
("latitude", "", False),
("longitude", "", False),
("elevation", "", False),
("primary_location_name", "", False),
("country", "", False),
("province", "", False),
("county", "", False),
("collector", "", False),
("collection_start_date", "", False),
("collection_date_precision", "", False),
("original_archive", "", False),
)
MATERIALS = (
"Rock",
"Sediment",
"Soil",
"Synthetic",
"NotApplicable",
"Other",
"Biology",
"Gas",
"Ice",
"LiquidAqueous",
"LiquidOrganic",
"Mineral",
"Particulate",
)
# ============= EOF =============================================
|
{
"content_hash": "88285e0f4c71bc8ab279a0160b0ad4d6",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 117,
"avg_line_length": 23.575757575757574,
"alnum_prop": 0.5308483290488432,
"repo_name": "NMGRL/pychron",
"id": "ca695751f764aabc6a76b90cc245fc6bcdc61e24",
"size": "3973",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/igsn/definitions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class ImageOSDisk(Model):
"""Describes an Operating System disk.
:param os_type: This property allows you to specify the type of the OS
that is included in the disk if creating a VM from a custom image.
<br><br> Possible values are: <br><br> **Windows** <br><br> **Linux**.
Possible values include: 'Windows', 'Linux'
:type os_type: str or
~azure.mgmt.compute.v2017_03_30.models.OperatingSystemTypes
:param os_state: The OS State. Possible values include: 'Generalized',
'Specialized'
:type os_state: str or
~azure.mgmt.compute.v2017_03_30.models.OperatingSystemStateTypes
:param snapshot: The snapshot.
:type snapshot: ~azure.mgmt.compute.v2017_03_30.models.SubResource
:param managed_disk: The managedDisk.
:type managed_disk: ~azure.mgmt.compute.v2017_03_30.models.SubResource
:param blob_uri: The Virtual Hard Disk.
:type blob_uri: str
:param caching: Specifies the caching requirements. <br><br> Possible
values are: <br><br> **None** <br><br> **ReadOnly** <br><br> **ReadWrite**
<br><br> Default: **None for Standard storage. ReadOnly for Premium
storage**. Possible values include: 'None', 'ReadOnly', 'ReadWrite'
:type caching: str or ~azure.mgmt.compute.v2017_03_30.models.CachingTypes
:param disk_size_gb: Specifies the size of empty data disks in gigabytes.
This element can be used to overwrite the name of the disk in a virtual
machine image. <br><br> This value cannot be larger than 1023 GB
:type disk_size_gb: int
:param storage_account_type: Specifies the storage account type for the
managed disk. Possible values are: Standard_LRS or Premium_LRS. Possible
values include: 'Standard_LRS', 'Premium_LRS'
:type storage_account_type: str or
~azure.mgmt.compute.v2017_03_30.models.StorageAccountTypes
"""
_validation = {
'os_type': {'required': True},
'os_state': {'required': True},
}
_attribute_map = {
'os_type': {'key': 'osType', 'type': 'OperatingSystemTypes'},
'os_state': {'key': 'osState', 'type': 'OperatingSystemStateTypes'},
'snapshot': {'key': 'snapshot', 'type': 'SubResource'},
'managed_disk': {'key': 'managedDisk', 'type': 'SubResource'},
'blob_uri': {'key': 'blobUri', 'type': 'str'},
'caching': {'key': 'caching', 'type': 'CachingTypes'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'StorageAccountTypes'},
}
def __init__(self, os_type, os_state, snapshot=None, managed_disk=None, blob_uri=None, caching=None, disk_size_gb=None, storage_account_type=None):
super(ImageOSDisk, self).__init__()
self.os_type = os_type
self.os_state = os_state
self.snapshot = snapshot
self.managed_disk = managed_disk
self.blob_uri = blob_uri
self.caching = caching
self.disk_size_gb = disk_size_gb
self.storage_account_type = storage_account_type
|
{
"content_hash": "0fa6cb85eed725f3d47a75ca3c535a3b",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 151,
"avg_line_length": 48.3125,
"alnum_prop": 0.6545924967658473,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "9e656959bbcd2eb26ae3cda727c689454de35104",
"size": "3566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/image_os_disk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
"""Create Message Table
Revision ID: 3978aa063aba
Revises:
Create Date: 2017-09-01 14:01:18.246416
"""
# revision identifiers, used by Alembic.
revision = '3978aa063aba'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('messages',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('messages')
### end Alembic commands ###
|
{
"content_hash": "1cd92aaf6a08798fbfb8aaa2f5e047be",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 63,
"avg_line_length": 24.558823529411764,
"alnum_prop": 0.6730538922155689,
"repo_name": "vigov5/framgiatw_flask_demo",
"id": "f0fcb79129cb3277ba4d3b73947c18f5a45b0b73",
"size": "835",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "flask_app/alembic/versions/3978aa063aba_create_message_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1765"
},
{
"name": "HTML",
"bytes": "2974"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "4634"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
}
|
from c7n_azure.provider import resources
from c7n_azure.resources.arm import ArmResourceManager
@resources.register('cdnprofile')
class CdnProfile(ArmResourceManager):
"""CDN Resource
:example:
Returns all CDNs with Standard_Verizon sku
.. code-block:: yaml
policies:
- name: standard-verizon
resource: azure.cdnprofile
filters:
- type: value
key: sku
op: in
value_type: normalize
value: Standard_Verizon
"""
class resource_type(ArmResourceManager.resource_type):
doc_groups = ['Media']
service = 'azure.mgmt.cdn'
client = 'CdnManagementClient'
enum_spec = ('profiles', 'list', None)
default_report_fields = (
'name',
'location',
'resourceGroup',
'sku.name'
)
resource_type = 'Microsoft.Cdn/profiles'
|
{
"content_hash": "64c00780f2b110dba352ead1b2c27f87",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 58,
"avg_line_length": 24.564102564102566,
"alnum_prop": 0.5594989561586639,
"repo_name": "kapilt/cloud-custodian",
"id": "9dee6bbe1e54401ff9331ae7916eb8ea56e31c25",
"size": "1544",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/c7n_azure/resources/cdn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "8163"
},
{
"name": "Go",
"bytes": "146630"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9971"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "5354902"
},
{
"name": "Shell",
"bytes": "13032"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
}
|
from django import template
register = template.Library()
class VerbatimNode(template.Node):
def __init__(self, text):
self.text = text
def render(self, context):
return self.text
@register.tag
def verbatim(parser, token):
text = []
while 1:
token = parser.tokens.pop(0)
if token.contents == 'endverbatim':
break
if token.token_type == template.TOKEN_VAR:
text.append('{{ ')
elif token.token_type == template.TOKEN_BLOCK:
text.append('{%')
text.append(token.contents)
if token.token_type == template.TOKEN_VAR:
text.append(' }}')
elif token.token_type == template.TOKEN_BLOCK:
if not text[-1].startswith('='):
text[-1:-1] = [' ']
text.append(' %}')
return VerbatimNode(''.join(text))
|
{
"content_hash": "3fb33b694ea1519138b6b71517965ab1",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 54,
"avg_line_length": 26.393939393939394,
"alnum_prop": 0.5545350172215844,
"repo_name": "socradev/django-fancypages",
"id": "f8f379f4ce17dfabde211b94c811757a4c553fc7",
"size": "871",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fancypages/templatetags/verbatim.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "187646"
},
{
"name": "HTML",
"bytes": "64967"
},
{
"name": "JavaScript",
"bytes": "561457"
},
{
"name": "Makefile",
"bytes": "1282"
},
{
"name": "Python",
"bytes": "526740"
}
],
"symlink_target": ""
}
|
import pyvex
import claripy
import cle
from archinfo import ArchARM
from cachetools import LRUCache
import logging
from ..engine import SimEngineBase
from ...state_plugins.inspect import BP_AFTER, BP_BEFORE
from ...misc.ux import once
from ...errors import SimEngineError, SimTranslationError, SimError
from ... import sim_options as o
l = logging.getLogger(__name__)
VEX_IRSB_MAX_SIZE = 400
VEX_IRSB_MAX_INST = 99
class VEXLifter(SimEngineBase):
def __init__(self, project,
use_cache=None,
cache_size=50000,
default_opt_level=1,
support_selfmodifying_code=None,
single_step=False,
default_strict_block_end=False, **kwargs):
super().__init__(project, **kwargs)
self._use_cache = use_cache
self._default_opt_level = default_opt_level
self._cache_size = cache_size
self._support_selfmodifying_code = support_selfmodifying_code
self._single_step = single_step
self.default_strict_block_end = default_strict_block_end
if self._use_cache is None:
if self.project is not None:
self._use_cache = self.project._translation_cache
else:
self._use_cache = False
if self._support_selfmodifying_code is None:
if self.project is not None:
self._support_selfmodifying_code = self.project._support_selfmodifying_code
else:
self._support_selfmodifying_code = False
# block cache
self._block_cache = None
self._block_cache_hits = 0
self._block_cache_misses = 0
self._initialize_block_cache()
def _initialize_block_cache(self):
self._block_cache = LRUCache(maxsize=self._cache_size)
self._block_cache_hits = 0
self._block_cache_misses = 0
def clear_cache(self):
self._block_cache = LRUCache(maxsize=self._cache_size)
self._block_cache_hits = 0
self._block_cache_misses = 0
def lift_vex(self,
addr=None,
state=None,
clemory=None,
insn_bytes=None,
arch=None,
size=None,
num_inst=None,
traceflags=0,
thumb=False,
extra_stop_points=None,
opt_level=None,
strict_block_end=None,
skip_stmts=False,
collect_data_refs=False):
"""
Lift an IRSB.
There are many possible valid sets of parameters. You at the very least must pass some
source of data, some source of an architecture, and some source of an address.
Sources of data in order of priority: insn_bytes, clemory, state
Sources of an address, in order of priority: addr, state
Sources of an architecture, in order of priority: arch, clemory, state
:param state: A state to use as a data source.
:param clemory: A cle.memory.Clemory object to use as a data source.
:param addr: The address at which to start the block.
:param thumb: Whether the block should be lifted in ARM's THUMB mode.
:param opt_level: The VEX optimization level to use. The final IR optimization level is determined by
(ordered by priority):
- Argument opt_level
- opt_level is set to 1 if OPTIMIZE_IR exists in state options
- self._default_opt_level
:param insn_bytes: A string of bytes to use as a data source.
:param size: The maximum size of the block, in bytes.
:param num_inst: The maximum number of instructions.
:param traceflags: traceflags to be passed to VEX. (default: 0)
:param strict_block_end: Whether to force blocks to end at all conditional branches (default: false)
"""
# phase 0: sanity check
if not state and not clemory and not insn_bytes:
raise ValueError("Must provide state or clemory or insn_bytes!")
if not state and not clemory and not arch:
raise ValueError("Must provide state or clemory or arch!")
if addr is None and not state:
raise ValueError("Must provide state or addr!")
if arch is None:
arch = clemory._arch if clemory else state.arch
if arch.name.startswith("MIPS") and self._single_step:
l.error("Cannot specify single-stepping on MIPS.")
self._single_step = False
# phase 1: parameter defaults
if addr is None:
addr = state.solver.eval(state._ip)
if size is not None:
size = min(size, VEX_IRSB_MAX_SIZE)
if size is None:
size = VEX_IRSB_MAX_SIZE
if num_inst is not None:
num_inst = min(num_inst, VEX_IRSB_MAX_INST)
if num_inst is None and self._single_step:
num_inst = 1
if opt_level is None:
if state and o.OPTIMIZE_IR in state.options:
opt_level = 1
else:
opt_level = self._default_opt_level
if strict_block_end is None:
strict_block_end = self.default_strict_block_end
if self._support_selfmodifying_code:
if opt_level > 0:
if once('vex-engine-smc-opt-warning'):
l.warning("Self-modifying code is not always correctly optimized by PyVEX. "
"To guarantee correctness, VEX optimizations have been disabled.")
opt_level = 0
if state and o.OPTIMIZE_IR in state.options:
state.options.remove(o.OPTIMIZE_IR)
if skip_stmts is not True:
skip_stmts = False
use_cache = self._use_cache
if skip_stmts or collect_data_refs:
# Do not cache the blocks if skip_stmts or collect_data_refs are enabled
use_cache = False
# phase 2: thumb normalization
thumb = int(thumb)
if isinstance(arch, ArchARM):
if addr % 2 == 1:
thumb = 1
if thumb:
addr &= ~1
elif thumb:
l.error("thumb=True passed on non-arm architecture!")
thumb = 0
# phase 3: check cache
cache_key = None
if use_cache:
cache_key = (addr, insn_bytes, size, num_inst, thumb, opt_level, strict_block_end)
if cache_key in self._block_cache:
self._block_cache_hits += 1
irsb = self._block_cache[cache_key]
stop_point = self._first_stoppoint(irsb, extra_stop_points)
if stop_point is None:
return irsb
else:
size = stop_point - addr
# check the cache again
cache_key = (addr, insn_bytes, size, num_inst, thumb, opt_level, strict_block_end)
if cache_key in self._block_cache:
self._block_cache_hits += 1
return self._block_cache[cache_key]
else:
self._block_cache_misses += 1
else:
# a special case: `size` is used as the maximum allowed size
tmp_cache_key = (addr, insn_bytes, VEX_IRSB_MAX_SIZE, num_inst, thumb, opt_level, strict_block_end)
try:
irsb = self._block_cache[tmp_cache_key]
if irsb.size <= size:
self._block_cache_hits += 1
return self._block_cache[tmp_cache_key]
except KeyError:
self._block_cache_misses += 1
# vex_lift breakpoints only triggered when the cache isn't used
if state:
state._inspect('vex_lift', BP_BEFORE, mem_read_address=addr, mem_read_length=size)
# phase 4: get bytes
if insn_bytes is not None:
buff, size = insn_bytes, len(insn_bytes)
else:
buff, size = self._load_bytes(addr, size, state, clemory)
if not buff or size == 0:
raise SimEngineError("No bytes in memory for block starting at %#x." % addr)
# phase 5: call into pyvex
# l.debug("Creating pyvex.IRSB of arch %s at %#x", arch.name, addr)
try:
for subphase in range(2):
irsb = pyvex.lift(buff, addr + thumb, arch,
max_bytes=size,
max_inst=num_inst,
bytes_offset=thumb,
traceflags=traceflags,
opt_level=opt_level,
strict_block_end=strict_block_end,
skip_stmts=skip_stmts,
collect_data_refs=collect_data_refs,
)
if subphase == 0 and irsb.statements is not None:
# check for possible stop points
stop_point = self._first_stoppoint(irsb, extra_stop_points)
if stop_point is not None:
size = stop_point - addr
continue
if use_cache:
self._block_cache[cache_key] = irsb
if state:
state._inspect('vex_lift', BP_AFTER, mem_read_address=addr, mem_read_length=size)
return irsb
# phase x: error handling
except pyvex.PyVEXError as e:
l.debug("VEX translation error at %#x", addr)
if isinstance(buff, bytes):
l.debug('Using bytes: %r', buff)
else:
l.debug("Using bytes: %r", pyvex.ffi.buffer(buff, size))
raise SimTranslationError("Unable to translate bytecode") from e
def _load_bytes(self, addr, max_size, state=None, clemory=None):
if not clemory:
if state is None:
raise SimEngineError('state and clemory cannot both be None in _load_bytes().')
if o.ABSTRACT_MEMORY in state.options:
# abstract memory
clemory = state.memory.regions['global'].memory.mem._memory_backer
else:
# symbolic memory
clemory = state.memory.mem._memory_backer
buff, size = b"", 0
# Load from the clemory if we can
smc = self._support_selfmodifying_code
if state and not smc:
try:
p = state.memory.permissions(addr)
if p.symbolic:
smc = True
else:
smc = claripy.is_true(p & 2 != 0)
except: # pylint: disable=bare-except
smc = True # I don't know why this would ever happen, we checked this right?
if (not smc or not state) and isinstance(clemory, cle.Clemory):
try:
start, backer = next(clemory.backers(addr))
except StopIteration:
pass
else:
if start <= addr:
offset = addr - start
buff = pyvex.ffi.from_buffer(backer) + offset
size = len(backer) - offset
# If that didn't work, try to load from the state
if size == 0 and state:
fallback = True
if addr in state.memory and addr + max_size - 1 in state.memory:
try:
buff = state.solver.eval(state.memory.load(addr, max_size, inspect=False), cast_to=bytes)
size = max_size
fallback = False
except SimError:
l.warning("Cannot load bytes at %#x. Fallback to the slow path.", addr)
if fallback:
buff_lst = [ ]
symbolic_warned = False
for i in range(max_size):
if addr + i in state.memory:
try:
byte = state.memory.load(addr + i, 1, inspect=False)
if byte.symbolic and not symbolic_warned:
symbolic_warned = True
l.warning("Executing symbolic code at %#x", addr + i)
buff_lst.append(state.solver.eval(byte))
except SimError:
break
else:
break
buff = bytes(buff_lst)
size = len(buff)
size = min(max_size, size)
return buff, size
def _first_stoppoint(self, irsb, extra_stop_points=None):
"""
Enumerate the imarks in the block. If any of them (after the first one) are at a stop point, returns the address
of the stop point. None is returned otherwise.
"""
if extra_stop_points is None and self.project is None:
return None
first_imark = True
for stmt in irsb.statements:
if type(stmt) is pyvex.stmt.IMark: # pylint: disable=unidiomatic-typecheck
addr = stmt.addr + stmt.delta
if not first_imark:
if self.__is_stop_point(addr, extra_stop_points):
# could this part be moved by pyvex?
return addr
if stmt.delta != 0 and self.__is_stop_point(stmt.addr, extra_stop_points):
return addr
first_imark = False
return None
def __is_stop_point(self, addr, extra_stop_points=None):
if self.project is not None and addr in self.project._sim_procedures:
return True
elif extra_stop_points is not None and addr in extra_stop_points:
return True
return False
def __getstate__(self):
ostate = super().__getstate__()
s = {
'_use_cache': self._use_cache,
'_default_opt_level': self._default_opt_level,
'_support_selfmodifying_code': self._support_selfmodifying_code,
'_single_step': self._single_step,
'_cache_size': self._cache_size,
'default_strict_block_end': self.default_strict_block_end
}
return (s, ostate)
def __setstate__(self, state):
s, ostate = state
self._use_cache = s['_use_cache']
self._default_opt_level = s['_default_opt_level']
self._support_selfmodifying_code = s['_support_selfmodifying_code']
self._single_step = s['_single_step']
self._cache_size = s['_cache_size']
self.default_strict_block_end = s['default_strict_block_end']
# rebuild block cache
self._initialize_block_cache()
super().__setstate__(ostate)
|
{
"content_hash": "f42b6f4189178f4519c547b4eb3d6089",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 120,
"avg_line_length": 40.44086021505376,
"alnum_prop": 0.5291810688646637,
"repo_name": "schieb/angr",
"id": "a3ef409c97fee5d0ceabaeaad429a5122129eb8a",
"size": "15044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/engines/vex/lifter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "39522"
},
{
"name": "Dockerfile",
"bytes": "493"
},
{
"name": "Makefile",
"bytes": "739"
},
{
"name": "Python",
"bytes": "4987778"
}
],
"symlink_target": ""
}
|
"""Unit tests for the stub provider.
"""
import unittest
from dsub.providers import stub
def raw_ops(tasklist):
"""Convert returned operations to raw operations."""
return [t.raw_task_data() for t in tasklist]
class TestGetJob(unittest.TestCase):
def test_get_success(self):
prov = stub.StubJobProvider()
job_suc = {'job-id': 'job_suc', 'status': ('SUCCESS', '123')}
job_fail = {'job-id': 'job_fail', 'status': ('FAILURE', '123')}
prov.set_operations([job_suc, job_fail])
tasks = prov.lookup_job_tasks(['SUCCESS'])
self.assertEqual(raw_ops(tasks), [job_suc])
def test_get_several(self):
prov = stub.StubJobProvider()
job_suc = {'job-id': 'job_suc', 'status': ('SUCCESS', '123')}
job_fail = {'job-id': 'job_fail', 'status': ('FAILURE', '123')}
job_run = {'job-id': 'job_run', 'status': ('RUNNING', '123')}
prov.set_operations([job_suc, job_fail, job_run])
tasks = prov.lookup_job_tasks(['SUCCESS', 'FAILURE'])
self.assertEqual(raw_ops(tasks), [job_suc, job_fail])
def test_get_star(self):
prov = stub.StubJobProvider()
job_suc = {'job-id': 'job_suc', 'status': ('SUCCESS', '123')}
job_fail = {'job-id': 'job_fail', 'status': ('FAILURE', '123')}
prov.set_operations([job_suc, job_fail])
tasks = prov.lookup_job_tasks('*')
self.assertEqual(raw_ops(tasks), [job_suc, job_fail])
def test_get_star_list(self):
prov = stub.StubJobProvider()
job_suc = {'job-id': 'job_suc', 'status': ('SUCCESS', '123')}
job_fail = {'job-id': 'job_fail', 'status': ('FAILURE', '123')}
prov.set_operations([job_suc, job_fail])
tasks = prov.lookup_job_tasks(['*'])
self.assertEqual(raw_ops(tasks), [job_suc, job_fail])
def test_get_none(self):
prov = stub.StubJobProvider()
job_suc = {'job-id': 'job_suc', 'status': ('SUCCESS', '123')}
job_fail = {'job-id': 'job_fail', 'status': ('FAILURE', '123')}
prov.set_operations([job_suc, job_fail])
tasks = prov.lookup_job_tasks(None)
self.assertEqual(raw_ops(tasks), [job_suc, job_fail])
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "0c5d3d4e8ee782594dcdcf62a65fe610",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 67,
"avg_line_length": 36.189655172413794,
"alnum_prop": 0.6107670319199618,
"repo_name": "mbookman/task-submission-tools",
"id": "ea68135899c5af59a751c3426550ca7f7cbd648d",
"size": "2695",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/unit/test_stub_provider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "122311"
},
{
"name": "Shell",
"bytes": "85205"
}
],
"symlink_target": ""
}
|
from os.path import isfile, join
from platformio.commands.ci import cli as cmd_ci
from platformio.commands.lib.command import cli as cmd_lib
def test_ci_empty(clirunner):
result = clirunner.invoke(cmd_ci)
assert result.exit_code != 0
assert "Invalid value: Missing argument 'src'" in result.output
def test_ci_boards(clirunner, validate_cliresult):
result = clirunner.invoke(
cmd_ci,
[
join("examples", "wiring-blink", "src", "main.cpp"),
"-b",
"uno",
"-b",
"leonardo",
],
)
validate_cliresult(result)
def test_ci_build_dir(clirunner, tmpdir_factory, validate_cliresult):
build_dir = str(tmpdir_factory.mktemp("ci_build_dir"))
result = clirunner.invoke(
cmd_ci,
[
join("examples", "wiring-blink", "src", "main.cpp"),
"-b",
"uno",
"--build-dir",
build_dir,
],
)
validate_cliresult(result)
assert not isfile(join(build_dir, "platformio.ini"))
def test_ci_keep_build_dir(clirunner, tmpdir_factory, validate_cliresult):
build_dir = str(tmpdir_factory.mktemp("ci_build_dir"))
result = clirunner.invoke(
cmd_ci,
[
join("examples", "wiring-blink", "src", "main.cpp"),
"-b",
"uno",
"--build-dir",
build_dir,
"--keep-build-dir",
],
)
validate_cliresult(result)
assert isfile(join(build_dir, "platformio.ini"))
# 2nd attempt
result = clirunner.invoke(
cmd_ci,
[
join("examples", "wiring-blink", "src", "main.cpp"),
"-b",
"metro",
"--build-dir",
build_dir,
"--keep-build-dir",
],
)
validate_cliresult(result)
assert "board: uno" in result.output
assert "board: metro" in result.output
def test_ci_keep_build_dir_single_src_dir(
clirunner, tmpdir_factory, validate_cliresult
):
build_dir = str(tmpdir_factory.mktemp("ci_build_dir"))
# Run two times to detect possible "AlreadyExists" errors
for _ in range(2):
result = clirunner.invoke(
cmd_ci,
[
join("examples", "wiring-blink", "src"),
"-b",
"uno",
"--build-dir",
build_dir,
"--keep-build-dir",
],
)
validate_cliresult(result)
def test_ci_keep_build_dir_nested_src_dirs(
clirunner, tmpdir_factory, validate_cliresult
):
build_dir = str(tmpdir_factory.mktemp("ci_build_dir"))
# Split default Arduino project in two parts
src_dir1 = tmpdir_factory.mktemp("src_1")
src_dir1.join("src1.cpp").write(
"""
#include <Arduino.h>
void setup() {}
"""
)
src_dir2 = tmpdir_factory.mktemp("src_2")
src_dir2.join("src2.cpp").write(
"""
#include <Arduino.h>
void loop() {}
"""
)
src_dir1 = str(src_dir1)
src_dir2 = str(src_dir2)
# Run two times to detect possible "AlreadyExists" errors
for _ in range(2):
result = clirunner.invoke(
cmd_ci,
[
src_dir1,
src_dir2,
"-b",
"teensy40",
"--build-dir",
build_dir,
"--keep-build-dir",
],
)
validate_cliresult(result)
def test_ci_project_conf(clirunner, validate_cliresult):
project_dir = join("examples", "wiring-blink")
result = clirunner.invoke(
cmd_ci,
[
join(project_dir, "src", "main.cpp"),
"--project-conf",
join(project_dir, "platformio.ini"),
],
)
validate_cliresult(result)
assert "uno" in result.output
def test_ci_lib_and_board(clirunner, tmpdir_factory, validate_cliresult):
storage_dir = str(tmpdir_factory.mktemp("lib"))
result = clirunner.invoke(
cmd_lib, ["--storage-dir", storage_dir, "install", "1@2.3.2"]
)
validate_cliresult(result)
result = clirunner.invoke(
cmd_ci,
[
join(
storage_dir,
"OneWire",
"examples",
"DS2408_Switch",
"DS2408_Switch.pde",
),
"-l",
join(storage_dir, "OneWire"),
"-b",
"uno",
],
)
validate_cliresult(result)
|
{
"content_hash": "b75afe0b88d5e34d35ef1c9f5efb0864",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 74,
"avg_line_length": 25.134078212290504,
"alnum_prop": 0.5183374083129584,
"repo_name": "platformio/platformio",
"id": "01ac9c3764cba82f37cbe0838d03ac613511613e",
"size": "5110",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/commands/test_ci.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "1826"
},
{
"name": "Makefile",
"bytes": "356"
},
{
"name": "Processing",
"bytes": "101"
},
{
"name": "Python",
"bytes": "333618"
},
{
"name": "Smarty",
"bytes": "45408"
}
],
"symlink_target": ""
}
|
"""Utilities for VariableMgr."""
from __future__ import print_function
import collections as pycoll
import operator
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import gradients_impl
PS_SHADOW_VAR_PREFIX = 'ps_var'
AutoLossScaleParams = pycoll.namedtuple(
'AutoLossScaleParams',
[
# If true, enable automatic loss scaling.
'enable_auto_loss_scale',
# The value to scale the loss before computing gradients.
'loss_scale',
# Number of normal steps with the current `loss_scale`.
'loss_scale_normal_steps',
# Increase loss scale every n steps.
'inc_loss_scale_every_n',
# If true, the current worker is chief. The current implementation
# relies on the chief to update loss_scale value, but in future, we
# might change this to ask the parameter server to update loss_scales
# for better performance.
# TODO(tanmingxing): remove this if loss_scale is updated in ps.
'is_chief',
])
def get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n):
"""Returns the update op for loss scaling variables.
We maintain the counter `loss_scale_normal_steps` to count the number of steps
we have been using the current `loss_scale`. In most cases, this function
increments `loss_scale_normal_steps`. However, if `loss_scale_normal_steps` is
greater than the threshold `inc_loss_scale_every_n`, we double `loss_scale`
and reset `loss_scale_normal_steps` to zero.
This op is only called if the gradients don't have any infs or nans. Instead,
if infs or nans occur in the gradients, we immeditately halve `loss_scale` and
reset `loss_scale_normal_steps` to zero.
Args:
loss_scale: a tf.Variable represneting the loss_scale value.
loss_scale_normal_steps: a tf.Variable representing the number of training
steps that have run since the loss_scale last changed.
inc_loss_scale_every_n: a Python integer threshold. `loss_scale` is
increased every `inc_loss_scale_every_n` steps, unless the gradients have
infs or nans.
Returns:
An op for updating `loss_scale` and `loss_scale_normal_steps`.
"""
def increment_loss_scale_normal_steps_func():
return tf.group(loss_scale_normal_steps.assign_add(1))
def increase_loss_scale_func():
return tf.group(
tf.assign(loss_scale_normal_steps, 0),
tf.assign(loss_scale, loss_scale * 2))
# true_fn and false_fn must have the same type.
return tf.cond(loss_scale_normal_steps < inc_loss_scale_every_n,
increment_loss_scale_normal_steps_func,
increase_loss_scale_func)
def append_gradients_with_loss_scale(training_ops, get_apply_gradients_ops_func,
loss_scale_params, grad_has_inf_nan):
"""Selectively appends gradients update ops with loss scaling.
Args:
training_ops: a list of training ops to be executed.
get_apply_gradients_ops_func: a function that returns a list of ops for
applying gradients. Here, we must pass a function instead of the actual
list of ops; otherwise, those ops would be executed unconditionally due to
the semantics of tf.cond.
loss_scale_params: An AutoLossScaleParams tuple.
grad_has_inf_nan: Boolean tensor indicating whether the gradients have infs
or nans.
"""
is_chief = loss_scale_params.is_chief
loss_scale = loss_scale_params.loss_scale
loss_scale_normal_steps = loss_scale_params.loss_scale_normal_steps
inc_loss_scale_every_n = loss_scale_params.inc_loss_scale_every_n
enable_auto_loss_scale = loss_scale_params.enable_auto_loss_scale
if loss_scale is None or not enable_auto_loss_scale or not is_chief:
training_ops.extend(get_apply_gradients_ops_func())
else:
# If nans/infs occurred, skip applying gradients and instead update
# loss_scale (halve loss_scale and reset loss_scale_normal_steps to zero).
def update_op_if_nan_or_inf():
"""Update loss_scale and discard gradients if nans/infs occurred."""
return tf.group(
tf.assign(loss_scale, loss_scale / 2.),
tf.assign(loss_scale_normal_steps, 0))
# Otherwise, apply gradients, and update loss_scale and
# loss_scale_normal_steps.
def update_op_if_no_nan_or_inf():
"""Apply gradients, and update loss scaling."""
return tf.group(
get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n),
*get_apply_gradients_ops_func())
# TODO(tanmingxing): Add support for independent and distributed all_reduce.
assert grad_has_inf_nan is not None
update_op = tf.cond(
grad_has_inf_nan,
update_op_if_nan_or_inf,
update_op_if_no_nan_or_inf,
name='cond_if_grad_has_inf_nan'
)
training_ops.append(update_op)
# To be used with custom_getter on tf.get_variable.
class OverrideCachingDevice(object):
"""Variable getter which caches variables on the least loaded device.
Variables smaller than a certain threshold are cached on a single specific
device, as specified in the constructor. All other variables are load balanced
across a pool of devices, by caching each variable on the least loaded device.
Note that variable creation only happen when building the model graph on the
first device (see how it sets the 'reuse' parameter in
VariableMgr.*.create_outer_variable_scope()). That means, for all other
devices, the variable scope will reuse the variables created before, which
requires that we set the caching_device correctly as otherwise it may not be
able to find the previously created variable and will create a new one. This
requires when building the model graph on different devices, variables with
the same name should have same size.
TODO(laigd): consider adding tests or verification logic to enforce this, or
refactor it.
"""
def __init__(self, devices, device_for_small_variables,
small_variable_size_threshold):
self.devices = devices
self.sizes = [0] * len(self.devices)
self.device_for_small_variables = device_for_small_variables
self.small_variable_size_threshold = small_variable_size_threshold
def __call__(self, getter, *args, **kwargs):
size = tf.TensorShape(kwargs['shape']).num_elements()
if size < self.small_variable_size_threshold:
device_name = self.device_for_small_variables
else:
device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
device_name = self.devices[device_index]
self.sizes[device_index] += size
kwargs['caching_device'] = device_name
var = getter(*args, **kwargs)
return var
# To be used with custom_getter on tf.get_variable. Ensures the created variable
# is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
class OverrideToLocalVariableIfNotPsVar(object):
# args and kwargs come from the custom_getter interface for Tensorflow
# variables, and matches tf.get_variable's signature, with the addition of
# 'getter' at the beginning.
def __call__(self, getter, name, *args, **kwargs):
if name.startswith(PS_SHADOW_VAR_PREFIX):
return getter(*args, **kwargs)
if 'collections' in kwargs:
collections = kwargs['collections']
if not collections:
collections = [tf.GraphKeys.GLOBAL_VARIABLES]
else:
collections = collections[:]
collections.remove(tf.GraphKeys.GLOBAL_VARIABLES)
collections.append(tf.GraphKeys.LOCAL_VARIABLES)
kwargs['collections'] = list(collections)
return getter(name, *args, **kwargs)
class ParamServerDeviceSetter(object):
"""Helper class to assign variables on the least loaded ps-device."""
def __init__(self, worker_device, ps_devices):
"""Initializer for ParamServerDevicSetter.
Args:
worker_device: the device to use for computer ops.
ps_devices: a list of device to use for Variable ops. Each variable is
assigned to the least loaded device.
"""
self.ps_devices = ps_devices
self.worker_device = worker_device
self.ps_sizes = [0] * len(self.ps_devices)
def __call__(self, op):
if op.device:
return op.device
if op.type not in ['Variable', 'VariableV2']:
return self.worker_device
device_index, _ = min(enumerate(self.ps_sizes), key=operator.itemgetter(1))
device_name = self.ps_devices[device_index]
var_size = op.outputs[0].get_shape().num_elements()
self.ps_sizes[device_index] += var_size
return device_name
class StagedModelVariable(object):
"""Staging variable wrapper that decouples reads and updates.
This class represents a variable through a staging buffer. Reads from this
variable directly gets from the staging buffer. Updates are stacked into
another staging buffer, and will be processed later.
"""
def __init__(self, real_var, var_stage_get, variable_mgr):
"""Initializer for the model variables through a staging buffer.
Args:
real_var: the underlying real variable.
var_stage_get: the read op from the staging buffer.
variable_mgr: the parent variable-manager.
"""
self.real_var = real_var
self.var_stage_get = var_stage_get
self.variable_mgr = variable_mgr
def _value(self):
"""The read access of this variable. The content from the staging buffer."""
return self.var_stage_get
def _ref(self):
"""Return the underlying variable ref, required by tf.colocate_with."""
return self.real_var._ref() # pylint: disable=protected-access
def read_value(self):
"""Mimics tf.Variable.read_value()."""
return tf.identity(self.var_stage_get, name='read')
@property
def dtype(self):
"""Return the non-reference dtype."""
return self.var_stage_get.dtype
def assign_sub(self, delta, name=None):
"""Mimic the updates to the variable.
Args:
delta: is pushed into a staging buffer and will be pumped later.
name: currently ignored; names of ops and the StagingArea are
computed without using this pass name.
Returns:
The actual updates. The colocation constraint will be reapplied.
"""
# This parameter is ignored: the StagingArea only supports setting
# the shared name, not the names of individual ops it uses.
del name
# colocate_with(None, True) clears the colocation constraints.
# Push the delta into a staging buffer.
with ops.colocate_with(None, True), tf.device(self.var_stage_get.device):
delta_staging_area = tf.contrib.staging.StagingArea(
[self.var_stage_get.dtype], shapes=[self.var_stage_get.shape])
delta_put_op = delta_staging_area.put([delta])
self.variable_mgr.staging_delta_ops.append(delta_put_op)
delta_get_op = delta_staging_area.get()[0]
# Return the actual updates. The colocation constraint will be reapplied.
return self.real_var.assign_sub(delta_get_op)
@staticmethod
# pylint: disable=bad-staticmethod-argument,invalid-name
def _TensorConversionFunction(self, dtype=None, name=None, as_ref=False):
"""Utility function for converting a StagedModelVariable to a Tensor."""
del dtype, name # unused: this function returns the cached ref or value.
if as_ref:
return self._ref()
else:
return self._value()
ops.register_tensor_conversion_function(
StagedModelVariable, StagedModelVariable._TensorConversionFunction) # pylint: disable=protected-access
class StagedVariableGetter(object):
"""A variable getter through staging buffers on devices.
Instead of a caching device, this getter tracks where the variable is used.
And on each device, it goes through a staging buffer.
"""
def __init__(self, device_num, devices, cpu_device, variable_mgr):
"""Initializer for StagedVariableGetter.
Args:
device_num: the current device index.
devices: a list of all the devices to build towers.
cpu_device: a cpu_device for this replica. If None, no cpu-caching is
done.
variable_mgr: the parent variable manager.
"""
self.device_num = device_num
self.devices = devices
self.cpu_device = cpu_device
self.variable_mgr = variable_mgr
def __call__(self, getter, name, *args, **kwargs):
staging_ops = self.variable_mgr.staging_vars_on_devices[self.device_num]
if name in staging_ops:
put_op, get_op = staging_ops[name]
return get_op
real_var = getter(name, *args, **kwargs)
shape = kwargs['shape']
dtype = kwargs['dtype']
trainable = kwargs['trainable']
if self.cpu_device:
with tf.device(self.cpu_device):
# This helps copying the weights from the parameter to this server only
# once.
if name in self.variable_mgr.staged_vars_on_cpu:
cpu_var = self.variable_mgr.staged_vars_on_cpu[name]
else:
cpu_var = tf.identity(real_var)
self.variable_mgr.staged_vars_on_cpu[name] = cpu_var
var_to_stage = cpu_var
else:
var_to_stage = tf.identity(real_var) # de-reference the variable.
with tf.device(self.devices[self.device_num]):
staging_area = tf.contrib.staging.StagingArea([dtype], shapes=[shape])
put_op = staging_area.put([var_to_stage])
get_op = staging_area.get()[0]
staging_ops[name] = (put_op, get_op)
if trainable:
# For trainable variables, they are managed separatedly through
# apply_gradients.
return get_op
else:
# For other shadow variables, the access is decoupled through a wrapper
# class.
return StagedModelVariable(real_var, get_op, self.variable_mgr)
def trainable_variables_on_device(self, rel_device_num, abs_device_num,
writable):
"""Return the set of trainable variables on the specified device.
Args:
rel_device_num: local worker device index.
abs_device_num: global graph device index.
writable: whether the returned variables is writable or read-only.
Returns:
Return the set of trainable variables on the specified device.
"""
del abs_device_num
params_refs = tf.trainable_variables()
if writable:
return params_refs
params = []
for param in params_refs:
var_name = param.name.split(':')[0]
_, var_get_op = self.variable_mgr.staging_vars_on_devices[rel_device_num][
var_name]
params.append(var_get_op)
return params
def aggregate_gradients_using_copy_with_device_selection(
benchmark_cnn, tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, controlling device for the aggregation.
Args:
benchmark_cnn: benchmark_cnn class.
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
if benchmark_cnn.local_parameter_device_flag == 'gpu':
avail_devices = benchmark_cnn.raw_devices
else:
avail_devices = [benchmark_cnn.param_server_device]
agg_grads = []
has_nan_or_inf_list = []
for i, single_grads in enumerate(zip(*tower_grads)):
with tf.device(avail_devices[i % len(avail_devices)]):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy_with_variable_colocation(
tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, colocating computation with the gradient's variable.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients. All variables
of the same gradient across towers must be the same (that is,
tower_grads[x][a][1] == tower_grads[y][a][1] for all indices x, y, and a)
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
# Note that each single_grads looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
var = single_grads[0][1]
for _, v in single_grads:
assert v == var
with tf.device(var.device):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy(tower_grads, use_mean, check_inf_nan):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single tower, and the number of pairs
equals the number of towers.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
grads = [g for g, _ in grad_and_vars]
if any(isinstance(g, tf.IndexedSlices) for g in grads):
# TODO(reedwm): All-reduce IndexedSlices more effectively.
grad = gradients_impl._AggregateIndexedSlicesGradients(grads) # pylint: disable=protected-access
else:
grad = tf.add_n(grads)
if use_mean and len(grads) > 1:
grad = tf.scalar_mul(1.0 / len(grads), grad)
v = grad_and_vars[0][1]
if check_inf_nan:
with tf.name_scope('check_for_inf_and_nan'):
has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
|
{
"content_hash": "11fbdf854259beaa6ee444aa03082e90",
"timestamp": "",
"source": "github",
"line_count": 520,
"max_line_length": 107,
"avg_line_length": 38.86538461538461,
"alnum_prop": 0.6877288471053934,
"repo_name": "mlperf/training_results_v0.5",
"id": "62ebc5d8ce9a8874157444728480695ef132feb6",
"size": "20887",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "v0.5.0/google/cloud_v3.8/ssd-tpuv3-8/code/ssd/model/staging/models/rough/nmt_gpu/variable_mgr/variable_mgr_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5720"
},
{
"name": "C++",
"bytes": "1288180"
},
{
"name": "CMake",
"bytes": "40880"
},
{
"name": "CSS",
"bytes": "32420"
},
{
"name": "Cuda",
"bytes": "1362093"
},
{
"name": "Dockerfile",
"bytes": "19488"
},
{
"name": "Go",
"bytes": "1088660"
},
{
"name": "HTML",
"bytes": "19756888"
},
{
"name": "Java",
"bytes": "45405"
},
{
"name": "JavaScript",
"bytes": "302838"
},
{
"name": "Jupyter Notebook",
"bytes": "9104667"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "Makefile",
"bytes": "3652"
},
{
"name": "Python",
"bytes": "31508548"
},
{
"name": "Scala",
"bytes": "106211"
},
{
"name": "Shell",
"bytes": "409745"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('structure', '0008_auto_20181212_1536'),
]
operations = [
migrations.AddField(
model_name='structure',
name='stats_text',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='structure.StatsText'),
),
]
|
{
"content_hash": "0398e6377249b08c2c7fdb4b25b5effc",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 118,
"avg_line_length": 26.176470588235293,
"alnum_prop": 0.6314606741573033,
"repo_name": "cmunk/protwis",
"id": "4f906aeadcaff4f3d423e0b78315568ba0567d10",
"size": "494",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "structure/migrations/0009_structure_stats_text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "139292"
},
{
"name": "HTML",
"bytes": "2035504"
},
{
"name": "JavaScript",
"bytes": "2234465"
},
{
"name": "Python",
"bytes": "3400624"
},
{
"name": "Shell",
"bytes": "386"
}
],
"symlink_target": ""
}
|
import sparkpost
from .exceptions import SparkPostAPIException
class RequestsTransport(object):
def __init__(self):
import requests
self.sess = requests.Session()
def request(self, method, uri, headers, **kwargs):
response = self.sess.request(method, uri, headers=headers, **kwargs)
if response.status_code == 204:
return True
if not response.ok:
raise SparkPostAPIException(response)
if 'results' in response.json():
return response.json()['results']
return response.json()
class Resource(object):
key = ""
def __init__(self, base_uri, api_key, transport_class=RequestsTransport):
self.base_uri = base_uri
self.api_key = api_key
self.transport = transport_class()
@property
def uri(self):
return "%s/%s" % (self.base_uri, self.key)
def request(self, method, uri, **kwargs):
headers = {
'User-Agent': 'python-sparkpost/' + sparkpost.__version__,
'Content-Type': 'application/json',
'Authorization': self.api_key
}
response = self.transport.request(method, uri, headers=headers,
**kwargs)
return response
def get(self):
raise NotImplementedError
def list(self):
raise NotImplementedError
def create(self):
raise NotImplementedError
def update(self):
raise NotImplementedError
def delete(self):
raise NotImplementedError
|
{
"content_hash": "87ef7f6f22eb73ed5cc55f2a2ab10c19",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 77,
"avg_line_length": 27.280701754385966,
"alnum_prop": 0.5954983922829582,
"repo_name": "thonkify/thonkify",
"id": "4c3c4672ab01608df1499e7ed1eef71d12a76915",
"size": "1555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lib/sparkpost/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10460214"
},
{
"name": "Shell",
"bytes": "1470"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: slxos_command
version_added: "2.6"
author: "Lindsay Hill (@LindsayHill)"
short_description: Run commands on remote devices running Extreme Networks SLX-OS
description:
- Sends arbitrary commands to an SLX node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(slxos_config) to configure SLX-OS devices.
notes:
- Tested against SLX-OS 17s.1.02
- If a command sent to the device requires answering a prompt, it is possible
to pass a dict containing I(command), I(answer) and I(prompt). See examples.
options:
commands:
description:
- List of commands to send to the remote SLX-OS device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
default: 1
"""
EXAMPLES = """
tasks:
- name: run show version on remote devices
slxos_command:
commands: show version
- name: run show version and check to see if output contains SLX
slxos_command:
commands: show version
wait_for: result[0] contains SLX
- name: run multiple commands on remote nodes
slxos_command:
commands:
- show version
- show interfaces
- name: run multiple commands and evaluate the output
slxos_command:
commands:
- show version
- show interface status
wait_for:
- result[0] contains SLX
- result[1] contains Eth
- name: run command that requires answering a prompt
slxos_command:
commands:
- command: 'clear sessions'
prompt: 'This operation will logout all the user sessions. Do you want to continue (yes/no)?:'
answer: y
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import re
import time
from ansible.module_utils.network.slxos.slxos import run_commands
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
__metaclass__ = type
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for item in list(commands):
configure_type = re.match(r'conf(?:\w*)(?:\s+(\w+))?', item['command'])
if module.check_mode:
if configure_type and configure_type.group(1) not in ('confirm', 'replace', 'revert', 'network'):
module.fail_json(
msg='slxos_command does not support running config mode '
'commands. Please use slxos_config instead'
)
if not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
commands.remove(item)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list'),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main()
|
{
"content_hash": "477166ca2e0343b177107fbaa8fcd554",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 109,
"avg_line_length": 32.56363636363636,
"alnum_prop": 0.6365159128978225,
"repo_name": "thaim/ansible",
"id": "ea6cb5c968e981e5a631b862f415b247ef8c3c1c",
"size": "7322",
"binary": false,
"copies": "70",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/network/slxos/slxos_command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from __future__ import division
import tensorflow as tf
class CaptionGenerator(object):
def __init__(self, word_to_idx, dim_feature=[196, 512], dim_embed=512, dim_hidden=1024, n_time_step=16,
prev2out=True, ctx2out=True, alpha_c=0.0, selector=True, dropout=True):
"""
Args:
word_to_idx: word-to-index mapping dictionary.
dim_feature: (optional) Dimension of vggnet19 conv5_3 feature vectors.
dim_embed: (optional) Dimension of word embedding.
dim_hidden: (optional) Dimension of all hidden state.
n_time_step: (optional) Time step size of LSTM.
prev2out: (optional) previously generated word to hidden state. (see Eq (7) for explanation)
ctx2out: (optional) context to hidden state (see Eq (7) for explanation)
alpha_c: (optional) Doubly stochastic regularization coefficient. (see Section (4.2.1) for explanation)
selector: (optional) gating scalar for context vector. (see Section (4.2.1) for explanation)
dropout: (optional) If true then dropout layer is added.
"""
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.iteritems()}
self.prev2out = prev2out
self.ctx2out = ctx2out
self.alpha_c = alpha_c
self.selector = selector
self.dropout = dropout
self.V = len(word_to_idx)
self.L = dim_feature[0]
self.D = dim_feature[1]
self.M = dim_embed
self.H = dim_hidden
self.T = n_time_step
self._start = word_to_idx['<START>']
self._null = word_to_idx['<NULL>']
self.weight_initializer = tf.contrib.layers.xavier_initializer()
self.const_initializer = tf.constant_initializer(0.0)
self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0)
# Place holder for features and captions
self.features = tf.placeholder(tf.float32, [None, self.L, self.D])
self.captions = tf.placeholder(tf.int32, [None, self.T + 1])
def _get_initial_lstm(self, features):
with tf.variable_scope('initial_lstm'):
features_mean = tf.reduce_mean(features, 1)
w_h = tf.get_variable('w_h', [self.D, self.H], initializer=self.weight_initializer)
b_h = tf.get_variable('b_h', [self.H], initializer=self.const_initializer)
h = tf.nn.tanh(tf.matmul(features_mean, w_h) + b_h)
w_c = tf.get_variable('w_c', [self.D, self.H], initializer=self.weight_initializer)
b_c = tf.get_variable('b_c', [self.H], initializer=self.const_initializer)
c = tf.nn.tanh(tf.matmul(features_mean, w_c) + b_c)
return c, h
def _word_embedding(self, inputs, reuse=False):
with tf.variable_scope('word_embedding', reuse=reuse):
w = tf.get_variable('w', [self.V, self.M], initializer=self.emb_initializer)
x = tf.nn.embedding_lookup(w, inputs, name='word_vector') # (N, T, M) or (N, M)
return x
def _project_features(self, features):
with tf.variable_scope('project_features'):
w = tf.get_variable('w', [self.D, self.D], initializer=self.weight_initializer)
features_flat = tf.reshape(features, [-1, self.D])
features_proj = tf.matmul(features_flat, w)
features_proj = tf.reshape(features_proj, [-1, self.L, self.D])
return features_proj
def _attention_layer(self, features, features_proj, h, reuse=False):
with tf.variable_scope('attention_layer', reuse=reuse):
w = tf.get_variable('w', [self.H, self.D], initializer=self.weight_initializer)
b = tf.get_variable('b', [self.D], initializer=self.const_initializer)
w_att = tf.get_variable('w_att', [self.D, 1], initializer=self.weight_initializer)
h_att = tf.nn.relu(features_proj + tf.expand_dims(tf.matmul(h, w), 1) + b) # (N, L, D)
out_att = tf.reshape(tf.matmul(tf.reshape(h_att, [-1, self.D]), w_att), [-1, self.L]) # (N, L)
alpha = tf.nn.softmax(out_att)
context = tf.reduce_sum(features * tf.expand_dims(alpha, 2), 1, name='context') #(N, D)
return context, alpha
def _selector(self, context, h, reuse=False):
with tf.variable_scope('selector', reuse=reuse):
w = tf.get_variable('w', [self.H, 1], initializer=self.weight_initializer)
b = tf.get_variable('b', [1], initializer=self.const_initializer)
beta = tf.nn.sigmoid(tf.matmul(h, w) + b, 'beta') # (N, 1)
context = tf.mul(beta, context, name='selected_context')
return context, beta
def _decode_lstm(self, x, h, context, dropout=False, reuse=False):
with tf.variable_scope('logits', reuse=reuse):
w_h = tf.get_variable('w_h', [self.H, self.M], initializer=self.weight_initializer)
b_h = tf.get_variable('b_h', [self.M], initializer=self.const_initializer)
w_out = tf.get_variable('w_out', [self.M, self.V], initializer=self.weight_initializer)
b_out = tf.get_variable('b_out', [self.V], initializer=self.const_initializer)
if dropout:
h = tf.nn.dropout(h, 0.5)
h_logits = tf.matmul(h, w_h) + b_h
if self.ctx2out:
w_ctx2out = tf.get_variable('w_ctx2out', [self.D, self.M], initializer=self.weight_initializer)
h_logits += tf.matmul(context, w_ctx2out)
if self.prev2out:
h_logits += x
h_logits = tf.nn.tanh(h_logits)
if dropout:
h_logits = tf.nn.dropout(h_logits, 0.5)
out_logits = tf.matmul(h_logits, w_out) + b_out
return out_logits
def _batch_norm(self, x, mode='train', name=None):
return tf.contrib.layers.batch_norm(inputs=x,
decay=0.95,
center=True,
scale=True,
is_training=(mode=='train'),
updates_collections=None,
scope=(name+'batch_norm'))
def build_model(self):
features = self.features
captions = self.captions
batch_size = tf.shape(features)[0]
captions_in = captions[:, :self.T]
captions_out = captions[:, 1:]
mask = tf.to_float(tf.not_equal(captions_out, self._null))
# batch normalize feature vectors
features = self._batch_norm(features, mode='train', name='conv_features')
c, h = self._get_initial_lstm(features=features)
x = self._word_embedding(inputs=captions_in)
features_proj = self._project_features(features=features)
loss = 0.0
alpha_list = []
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=self.H)
for t in range(self.T):
context, alpha = self._attention_layer(features, features_proj, h, reuse=(t!=0))
alpha_list.append(alpha)
if self.selector:
context, beta = self._selector(context, h, reuse=(t!=0))
with tf.variable_scope('lstm', reuse=(t!=0)):
_, (c, h) = lstm_cell(inputs=tf.concat(1, [x[:,t,:], context]), state=[c, h])
logits = self._decode_lstm(x[:,t,:], h, context, dropout=self.dropout, reuse=(t!=0))
loss += tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, captions_out[:, t]) * mask[:, t])
if self.alpha_c > 0:
alphas = tf.transpose(tf.pack(alpha_list), (1, 0, 2)) # (N, T, L)
alphas_all = tf.reduce_sum(alphas, 1) # (N, L)
alpha_reg = self.alpha_c * tf.reduce_sum((16./196 - alphas_all) ** 2)
loss += alpha_reg
return loss / tf.to_float(batch_size)
def build_sampler(self, max_len=20):
features = self.features
# batch normalize feature vectors
features = self._batch_norm(features, mode='test', name='conv_features')
c, h = self._get_initial_lstm(features=features)
features_proj = self._project_features(features=features)
sampled_word_list = []
alpha_list = []
beta_list = []
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_units=self.H)
for t in range(max_len):
if t == 0:
x = self._word_embedding(inputs=tf.fill([tf.shape(features)[0]], self._start))
else:
x = self._word_embedding(inputs=sampled_word, reuse=True)
context, alpha = self._attention_layer(features, features_proj, h, reuse=(t!=0))
alpha_list.append(alpha)
if self.selector:
context, beta = self._selector(context, h, reuse=(t!=0))
beta_list.append(beta)
with tf.variable_scope('lstm', reuse=(t!=0)):
_, (c, h) = lstm_cell(inputs=tf.concat(1, [x, context]), state=[c, h])
logits = self._decode_lstm(x, h, context, reuse=(t!=0))
sampled_word = tf.argmax(logits, 1)
sampled_word_list.append(sampled_word)
alphas = tf.transpose(tf.pack(alpha_list), (1, 0, 2)) # (N, T, L)
betas = tf.transpose(tf.squeeze(beta_list), (1, 0)) # (N, T)
sampled_captions = tf.transpose(tf.pack(sampled_word_list), (1, 0)) # (N, max_len)
return alphas, betas, sampled_captions
|
{
"content_hash": "55db9a2fb04df8b454840ca71d0009f9",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 122,
"avg_line_length": 47.189320388349515,
"alnum_prop": 0.559510338442547,
"repo_name": "infilect/ml-course1",
"id": "99e299036771f60146e5593f28f1030a0c4ed638",
"size": "10377",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "week4/show-attend-and-tell/core/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "113724"
},
{
"name": "HTML",
"bytes": "6268"
},
{
"name": "JavaScript",
"bytes": "31494"
},
{
"name": "Jupyter Notebook",
"bytes": "96338082"
},
{
"name": "Python",
"bytes": "2140030"
},
{
"name": "Shell",
"bytes": "11364"
}
],
"symlink_target": ""
}
|
import re
import fnmatch
import supybot.conf as conf
import supybot.utils as utils
import supybot.ircdb as ircdb
from supybot.commands import *
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
class User(callbacks.Plugin):
def _checkNotChannel(self, irc, msg, password=' '):
if password and irc.isChannel(msg.args[0]):
raise callbacks.Error, conf.supybot.replies.requiresPrivacy()
def list(self, irc, msg, args, optlist, glob):
"""[--capability=<capability>] [<glob>]
Returns the valid registered usernames matching <glob>. If <glob> is
not given, returns all registered usernames.
"""
predicates = []
for (option, arg) in optlist:
if option == 'capability':
def p(u, cap=arg):
try:
return u._checkCapability(cap)
except KeyError:
return False
predicates.append(p)
if glob:
r = re.compile(fnmatch.translate(glob), re.I)
def p(u):
return r.match(u.name) is not None
predicates.append(p)
users = []
for u in ircdb.users.itervalues():
for predicate in predicates:
if not predicate(u):
break
else:
users.append(u.name)
if users:
utils.sortBy(str.lower, users)
irc.reply(format('%L', users))
else:
if predicates:
irc.reply('There are no matching registered users.')
else:
irc.reply('There are no registered users.')
list = wrap(list, [getopts({'capability':'capability'}),
additional('glob')])
def register(self, irc, msg, args, name, password):
"""<name> <password>
Registers <name> with the given password <password> and the current
hostmask of the person registering. You shouldn't register twice; if
you're not recognized as a user but you've already registered, use the
hostmask add command to add another hostmask to your already-registered
user, or use the identify command to identify just for a session.
This command (and all other commands that include a password) must be
sent to the bot privately, not in a channel.
"""
addHostmask = True
try:
ircdb.users.getUserId(name)
irc.error('That name is already assigned to someone.', Raise=True)
except KeyError:
pass
if ircutils.isUserHostmask(name):
irc.errorInvalid('username', name,
'Hostmasks are not valid usernames.', Raise=True)
try:
u = ircdb.users.getUser(msg.prefix)
if u._checkCapability('owner'):
addHostmask = False
else:
irc.error('Your hostmask is already registered to %s' % u.name)
return
except KeyError:
pass
user = ircdb.users.newUser()
user.name = name
user.setPassword(password)
if addHostmask:
user.addHostmask(msg.prefix)
ircdb.users.setUser(user)
irc.replySuccess()
register = wrap(register, ['private', 'something', 'something'])
def unregister(self, irc, msg, args, user, password):
"""<name> [<password>]
Unregisters <name> from the user database. If the user giving this
command is an owner user, the password is not necessary.
"""
try:
caller = ircdb.users.getUser(msg.prefix)
isOwner = caller._checkCapability('owner')
except KeyError:
caller = None
isOwner = False
if not conf.supybot.databases.users.allowUnregistration():
if not caller or not isOwner:
self.log.warning('%s tried to unregister user %s.',
msg.prefix, user.name)
irc.error('This command has been disabled. You\'ll have to '
'ask the owner of this bot to unregister your user.',
Raise=True)
if isOwner or user.checkPassword(password):
ircdb.users.delUser(user.id)
irc.replySuccess()
else:
irc.error(conf.supybot.replies.incorrectAuthentication())
unregister = wrap(unregister, ['private', 'otherUser',
additional('anything')])
def changename(self, irc, msg, args, user, newname, password):
"""<name> <new name> [<password>]
Changes your current user database name to the new name given.
<password> is only necessary if the user isn't recognized by hostmask.
If you include the <password> parameter, this message must be sent
to the bot privately (not on a channel).
"""
try:
id = ircdb.users.getUserId(newname)
irc.error(format('%q is already registered.', newname))
return
except KeyError:
pass
if user.checkHostmask(msg.prefix) or user.checkPassword(password):
user.name = newname
ircdb.users.setUser(user)
irc.replySuccess()
changename = wrap(changename, ['private', 'otherUser', 'something',
additional('something', '')])
class set(callbacks.Commands):
def password(self, irc, msg, args, user, password, newpassword):
"""<name> <old password> <new password>
Sets the new password for the user specified by <name> to <new
password>. Obviously this message must be sent to the bot
privately (not in a channel). If the requesting user is an owner
user (and the user whose password is being changed isn't that same
owner user), then <old password> needn't be correct.
"""
try:
u = ircdb.users.getUser(msg.prefix)
except KeyError:
u = None
if user.checkPassword(password) or \
(u and u._checkCapability('owner') and not u == user):
user.setPassword(newpassword)
ircdb.users.setUser(user)
irc.replySuccess()
else:
irc.error(conf.supybot.replies.incorrectAuthentication())
password = wrap(password, ['otherUser', 'something', 'something'])
def secure(self, irc, msg, args, user, password, value):
"""<password> [<True|False>]
Sets the secure flag on the user of the person sending the message.
Requires that the person's hostmask be in the list of hostmasks for
that user in addition to the password being correct. When the
secure flag is set, the user *must* identify before he can be
recognized. If a specific True/False value is not given, it
inverts the current value.
"""
if value is None:
value = not user.secure
if user.checkPassword(password) and \
user.checkHostmask(msg.prefix, useAuth=False):
user.secure = value
ircdb.users.setUser(user)
irc.reply('Secure flag set to %s' % value)
else:
irc.error(conf.supybot.replies.incorrectAuthentication())
secure = wrap(secure, ['private', 'user', 'something',
additional('boolean')])
def username(self, irc, msg, args, hostmask):
"""<hostmask|nick>
Returns the username of the user specified by <hostmask> or <nick> if
the user is registered.
"""
if ircutils.isNick(hostmask):
try:
hostmask = irc.state.nickToHostmask(hostmask)
except KeyError:
irc.error('I haven\'t seen %s.' % hostmask, Raise=True)
try:
user = ircdb.users.getUser(hostmask)
irc.reply(user.name)
except KeyError:
irc.error('I don\'t know who that is.')
username = wrap(username, [first('nick', 'hostmask')])
class hostmask(callbacks.Commands):
def hostmask(self, irc, msg, args, nick):
"""[<nick>]
Returns the hostmask of <nick>. If <nick> isn't given, return the
hostmask of the person giving the command.
"""
if not nick:
nick = msg.nick
irc.reply(irc.state.nickToHostmask(nick))
hostmask = wrap(hostmask, [additional('seenNick')])
def list(self, irc, msg, args, name):
"""[<name>]
Returns the hostmasks of the user specified by <name>; if <name>
isn't specified, returns the hostmasks of the user calling the
command.
"""
def getHostmasks(user):
hostmasks = map(repr, user.hostmasks)
if hostmasks:
hostmasks.sort()
return format('%L', hostmasks)
else:
irc.reply(format('%s has no registered hostmasks.', user))
try:
user = ircdb.users.getUser(msg.prefix)
if name:
if name != user.name and \
not ircdb.checkCapability(msg.prefix, 'owner'):
irc.error('You may only retrieve your own hostmasks.',
Raise=True)
else:
try:
user = ircdb.users.getUser(name)
irc.reply(getHostmasks(user))
except KeyError:
irc.errorNoUser()
else:
irc.reply(getHostmasks(user))
except KeyError:
irc.errorNotRegistered()
list = wrap(list, ['private', additional('something')])
def add(self, irc, msg, args, user, hostmask, password):
"""[<name>] [<hostmask>] [<password>]
Adds the hostmask <hostmask> to the user specified by <name>. The
<password> may only be required if the user is not recognized by
hostmask. <password> is also not required if an owner user is
giving the command on behalf of some other user. If <hostmask> is
not given, it defaults to your current hostmask. If <name> is not
given, it defaults to your currently identified name. This message
must be sent to the bot privately (not on a channel) since it may
contain a password.
"""
if not hostmask:
hostmask = msg.prefix
if not ircutils.isUserHostmask(hostmask):
irc.errorInvalid('hostmask', hostmask,
'Make sure your hostmask includes a nick, '
'then an exclamation point (!), then a user, '
'then an at symbol (@), then a host. Feel '
'free to use wildcards (* and ?, which work '
'just like they do on the command line) in '
'any of these parts.',
Raise=True)
try:
otherId = ircdb.users.getUserId(hostmask)
if otherId != user.id:
irc.error('That hostmask is already registered.',
Raise=True)
except KeyError:
pass
if not user.checkPassword(password) and \
not user.checkHostmask(msg.prefix):
try:
u = ircdb.users.getUser(msg.prefix)
except KeyError:
irc.error(conf.supybot.replies.incorrectAuthentication(),
Raise=True)
if not u._checkCapability('owner'):
irc.error(conf.supybot.replies.incorrectAuthentication(),
Raise=True)
try:
user.addHostmask(hostmask)
except ValueError, e:
irc.error(str(e), Raise=True)
try:
ircdb.users.setUser(user)
except ValueError, e:
irc.error(str(e), Raise=True)
irc.replySuccess()
add = wrap(add, ['private', first('otherUser', 'user'),
optional('something'), additional('something', '')])
def remove(self, irc, msg, args, user, hostmask, password):
"""<name> <hostmask> [<password>]
Removes the hostmask <hostmask> from the record of the user
specified by <name>. If the hostmask given is 'all' then all
hostmasks will be removed. The <password> may only be required if
the user is not recognized by his hostmask. This message must be
sent to the bot privately (not on a channel) since it may contain a
password.
"""
if not user.checkPassword(password) and \
not user.checkHostmask(msg.prefix):
u = ircdb.users.getUser(msg.prefix)
if not u._checkCapability('owner'):
irc.error(conf.supybot.replies.incorrectAuthentication())
return
try:
s = ''
if hostmask == 'all':
user.hostmasks.clear()
s = 'All hostmasks removed.'
else:
user.removeHostmask(hostmask)
except KeyError:
irc.error('There was no such hostmask.')
return
ircdb.users.setUser(user)
irc.replySuccess(s)
remove = wrap(remove, ['private', 'otherUser', 'something',
additional('something', '')])
def capabilities(self, irc, msg, args, user):
"""[<name>]
Returns the capabilities of the user specified by <name>; if <name>
isn't specified, returns the capabilities of the user calling the
command.
"""
try:
u = ircdb.users.getUser(msg.prefix)
except KeyError:
irc.errorNotRegistered()
else:
if u == user or u._checkCapability('owner'):
irc.reply('[%s]' % '; '.join(user.capabilities), private=True)
else:
irc.error(conf.supybot.replies.incorrectAuthentication(),
Raise=True)
capabilities = wrap(capabilities, [first('otherUser', 'user')])
def identify(self, irc, msg, args, user, password):
"""<name> <password>
Identifies the user as <name>. This command (and all other
commands that include a password) must be sent to the bot privately,
not in a channel.
"""
if user.checkPassword(password):
try:
user.addAuth(msg.prefix)
ircdb.users.setUser(user, flush=False)
irc.replySuccess()
except ValueError:
irc.error('Your secure flag is true and your hostmask '
'doesn\'t match any of your known hostmasks.')
else:
self.log.warning('Failed identification attempt by %s (password '
'did not match for %s).', msg.prefix, user.name)
irc.error(conf.supybot.replies.incorrectAuthentication())
identify = wrap(identify, ['private', 'otherUser', 'something'])
def unidentify(self, irc, msg, args, user):
"""takes no arguments
Un-identifies you. Note that this may not result in the desired
effect of causing the bot not to recognize you anymore, since you may
have added hostmasks to your user that can cause the bot to continue to
recognize you.
"""
user.clearAuth()
ircdb.users.setUser(user)
irc.replySuccess('If you remain recognized after giving this command, '
'you\'re being recognized by hostmask, rather than '
'by password. You must remove whatever hostmask is '
'causing you to be recognized in order not to be '
'recognized.')
unidentify = wrap(unidentify, ['user'])
def whoami(self, irc, msg, args):
"""takes no arguments
Returns the name of the user calling the command.
"""
try:
user = ircdb.users.getUser(msg.prefix)
irc.reply(user.name)
except KeyError:
irc.reply('I don\'t recognize you.')
whoami = wrap(whoami)
def stats(self, irc, msg, args):
"""takes no arguments
Returns some statistics on the user database.
"""
users = 0
owners = 0
admins = 0
hostmasks = 0
for user in ircdb.users.itervalues():
users += 1
hostmasks += len(user.hostmasks)
try:
if user._checkCapability('owner'):
owners += 1
elif user._checkCapability('admin'):
admins += 1
except KeyError:
pass
irc.reply(format('I have %s registered users '
'with %s registered hostmasks; '
'%n and %n.',
users, hostmasks,
(owners, 'owner'), (admins, 'admin')))
stats = wrap(stats)
Class = User
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
{
"content_hash": "3b8b1f62d701c27af9a4449c2aed6e88",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 79,
"avg_line_length": 41.4953488372093,
"alnum_prop": 0.5346634534551364,
"repo_name": "jrabbit/ubotu-fr",
"id": "ad04768aa5cf0e325b63d027f41db99e57ecdb2d",
"size": "19432",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "plugins/User/plugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3850777"
}
],
"symlink_target": ""
}
|
"""
WSGI config for bigpandamon project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
further doc: http://thecodeship.com/deployment/deploy-django-apache-virtualenv-and-mod_wsgi/
"""
import os
import sys
import site
from os.path import join, pardir, abspath, dirname, split
### dummy settings settings_bigpandamon file with VIRTUALENV_PATH, WSGI_PATH
baseSettingsPath = '/data/bigpandamon_settings'
sys.path.append(baseSettingsPath)
#virtualenvPath = '/data/virtualenv/django1.6.1__python2.6.6__jedimon'
#virtualenvPath = '/data/virtualenv/django1.6.1__python2.6.6__atlas'
virtualenvPath = '/data/wenaus/virtualenv/twrpm'
path = virtualenvPath + '/pythonpath'
try:
# from settings_bigpandamon_jedimon import VIRTUALENV_PATH
# from settings_bigpandamon_jedimon import WSGI_PATH
from settings_bigpandamon_twrpm import VIRTUALENV_PATH
from settings_bigpandamon_twrpm import WSGI_PATH
virtualenvPath = VIRTUALENV_PATH
path = WSGI_PATH
except:
print "Something went wrong with import of WSGI_PATH from settings."
print "Staying with default path: %s" % path
# Add the site-packages of the chosen virtualenv to work with
site.addsitedir(virtualenvPath + '/lib/python2.7/site-packages')
# Add the app's directory to the PYTHONPATH
sys.path.append(path)
sys.path.append(path + '/pythonpath')
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bigpandamon.settings")
#os.environ["DJANGO_SETTINGS_MODULE"] = "atlas.settings"
# django settings module
DJANGO_SETTINGS_MODULE = '%s.%s' % (split(abspath(dirname(__file__)))[1], 'settings')
# pythonpath dirs
PYTHONPATH = [
join(dirname(__file__), pardir),
]
# inject few paths to pythonpath
for p in PYTHONPATH:
if p not in sys.path:
sys.path.insert(0, p)
os.environ['DJANGO_SETTINGS_MODULE'] = DJANGO_SETTINGS_MODULE
# Activate your virtual env
activate_env = os.path.expanduser(virtualenvPath + '/bin/activate_this.py')
execfile(activate_env, dict(__file__=activate_env))
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
## Apply WSGI middleware here.
## from helloworld.wsgi import HelloWorldApplication
## application = HelloWorldApplication(application)
#
|
{
"content_hash": "beb342752b8ec8d93e2f487d1b56e7d6",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 92,
"avg_line_length": 31.12,
"alnum_prop": 0.7536418166238218,
"repo_name": "Foorth/panda-bigmon-core",
"id": "840bcf748d713f02a736ceedd9542ce1759cc62e",
"size": "2334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "423502"
},
{
"name": "HTML",
"bytes": "573831"
},
{
"name": "JavaScript",
"bytes": "1149497"
},
{
"name": "Python",
"bytes": "1026573"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
import itertools
import logging
from multiprocessing.dummy import Pool as ThreadPool
from elasticsearch import helpers
from django.core.management.base import BaseCommand, CommandError
from django.db import connection, transaction
from imageledger import models, search
console = logging.StreamHandler()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
MAX_CONNECTION_RETRIES = 10
RETRY_WAIT = 5 # Number of sections to wait before retrying
DEFAULT_CHUNK_SIZE = 1000
DEFAULT_NUM_ITERATIONS = 100
class Command(BaseCommand):
can_import_settings = True
requires_migrations_checks = True
def add_arguments(self, parser):
parser.add_argument("--verbose",
action="store_true",
default=False,
help="Be very chatty and run logging at DEBUG")
parser.add_argument("--chunk-size",
dest="chunk_size",
default=DEFAULT_CHUNK_SIZE,
type=int,
help="The number of records to batch process at once")
parser.add_argument("--with-fingerprinting",
dest="with_fingerprinting",
action="store_true",
help="Whether to run the expensive perceptual hash routine as part of syncing")
parser.add_argument("--num-iterations",
dest="num_iterations",
default=DEFAULT_NUM_ITERATIONS,
type=int,
help="The number of times to loop through `chunk_size` records")
def handle(self, *args, **options):
if options['verbose']:
log.addHandler(console)
log.setLevel(logging.DEBUG)
self.sync_all_images(chunk_size=options['chunk_size'],
with_fingerprinting=options['with_fingerprinting'],
num_iterations=options['num_iterations'])
def sync_all_images(self, chunk_size=DEFAULT_CHUNK_SIZE, with_fingerprinting=False, num_iterations=DEFAULT_NUM_ITERATIONS):
"""Sync all of the images, sorting from least-recently-synced"""
with ThreadPool(4) as pool:
starts = [i * chunk_size for i in range(0, num_iterations)]
pool.starmap(do_sync, zip(starts,
itertools.repeat(chunk_size, len(starts)),
itertools.repeat(with_fingerprinting, len(starts))))
def do_sync(start, chunk_size, with_fingerprinting):
end = start + chunk_size
log.info("Starting sync in range from %d to %d...", start, end)
imgs = models.Image.objects.all().order_by('-last_synced_with_source')[start:end]
for img in imgs:
img.sync(attempt_perceptual_hash=with_fingerprinting)
|
{
"content_hash": "25f4e3be76397ad59bee0dc2835371b1",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 127,
"avg_line_length": 41.885714285714286,
"alnum_prop": 0.5931105047748977,
"repo_name": "creativecommons/open-ledger",
"id": "d4c46b9c5cd5226011aa93973dc8b475664cde0f",
"size": "2932",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "imageledger/management/commands/syncer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "88244"
},
{
"name": "HTML",
"bytes": "34125"
},
{
"name": "JavaScript",
"bytes": "61370"
},
{
"name": "Python",
"bytes": "1370212"
}
],
"symlink_target": ""
}
|
import cStringIO
import base64
# try to import the PIL Image
try:
from PIL import Image
except ImportError:
import Image
# Flask imports:
from flask import Flask, request, request_finished, json, abort, make_response, Response, jsonify
# facerec imports
# facerec imports:
import sys
sys.path.append("../../..")
from facerec.model import PredictableModel
from facerec.lbp import ExtendedLBP
from facerec.feature import SpatialHistogram
from facerec.distance import ChiSquareDistance
from facerec.classifier import NearestNeighbor
# logging
import logging
from logging.handlers import RotatingFileHandler
# the webserver recognition module
import recognition
# The main application:
app = Flask(__name__)
# This is a list of errors the Webservice returns. You can come up
# with new error codes and plug them into the API.
#
# An example JSON response for an error looks like this:
#
# { "status" : failed, "message" : "IMAGE_DECODE_ERROR", "code" : 10 }
#
# If there are multiple errors, only the first error is considered.
IMAGE_DECODE_ERROR = 10
IMAGE_RESIZE_ERROR = 11
PREDICTION_ERROR = 12
SERVICE_TEMPORARY_UNAVAILABLE = 20
UNKNOWN_ERROR = 21
INVALID_FORMAT = 30
INVALID_API_KEY = 31
INVALID_API_TOKEN = 32
MISSING_ARGUMENTS = 40
errors = {
IMAGE_DECODE_ERROR : "IMAGE_DECODE_ERROR",
IMAGE_RESIZE_ERROR : "IMAGE_RESIZE_ERROR",
SERVICE_TEMPORARY_UNAVAILABLE : "SERVICE_TEMPORARILY_UNAVAILABLE",
PREDICTION_ERROR : "PREDICTION_ERROR",
UNKNOWN_ERROR : "UNKNOWN_ERROR",
INVALID_FORMAT : "INVALID_FORMAT",
INVALID_API_KEY : "INVALID_API_KEY",
INVALID_API_TOKEN : "INVALID_API_TOKEN",
MISSING_ARGUMENTS : "MISSING_ARGUMENTS"
}
# Setup the logging for the server, so we can log all exceptions
# away. We also want to acquire a logger for the facerec framework,
# so we can be sure, that all logging goes into one place.
LOG_FILENAME = 'serverlog.log'
LOG_BACKUP_COUNT = 5
LOG_FILE_SIZE_BYTES = 50 * 1024 * 1024
def init_logger(app):
handler = RotatingFileHandler(LOG_FILENAME, maxBytes=LOG_FILE_SIZE_BYTES, backupCount=LOG_BACKUP_COUNT)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
loggers = [app.logger, logging.getLogger('facerec')]
for logger in loggers:
logger.addHandler(handler)
# Bring the model variable into global scope. This might be
# dangerous in Flask, I am trying to figure out, which is the
# best practice here.
# Initializes the Flask application, which is going to
# add the loggers, load the initial facerec model and
# all of this.
def init_app(app):
init_logger(app)
init_app(app)
@app.before_request
def log_request():
app.logger.debug("Request: %s %s", request.method, request.url)
# The WebAppException might be useful. It enables us to
# throw exceptions at any place in the application and give the user
# a custom error code.
class WebAppException(Exception):
def __init__(self, error_code, exception, status_code=None):
Exception.__init__(self)
self.status_code = 400
self.exception = exception
self.error_code = error_code
try:
self.message = errors[self.error_code]
except:
self.error_code = UNKNOWN_ERROR
self.message = errors[self.error_code]
if status_code is not None:
self.status_code = status_code
def to_dict(self):
rv = dict()
rv['status'] = 'failed'
rv['code'] = self.error_code
rv['message'] = self.message
return rv
# Wow, a decorator! This enables us to catch Exceptions
# in a method and raise a new WebAppException with the
# original Exception included. This is a quick and dirty way
# to minimize error handling code in our server.
class ThrowsWebAppException(object):
def __init__(self, error_code, status_code=None):
self.error_code = error_code
self.status_code = status_code
def __call__(self, function):
def returnfunction(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception as e:
raise WebAppException(self.error_code, e)
return returnfunction
# Register an error handler on the WebAppException, so we
# can return the error as JSON back to the User. At the same
# time you should do some logging, so it doesn't pass by
# silently.
@app.errorhandler(WebAppException)
def handle_exception(error):
app.logger.exception(error.exception)
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
# Now finally add the methods needed for our FaceRecognition API!
# Right now there is no rate limiting, no auth tokens and so on.
#
@ThrowsWebAppException(error_code = IMAGE_DECODE_ERROR)
def read_image(base64_image):
""" Decodes Base64 image data, reads it with PIL and converts it into grayscale.
Args:
base64_image [string] A Base64 encoded image (all types PIL supports).
"""
enc_data = base64.b64decode(base64_image)
file_like = cStringIO.StringIO(enc_data)
im = Image.open(file_like)
im = im.convert("L")
return im
def preprocess_image(image_data):
image = read_image(image_data)
return image
# Get the prediction from the global model.
@ThrowsWebAppException(error_code = PREDICTION_ERROR)
def get_prediction(image_data):
image = preprocess_image(image_data)
prediction = model.predict(image)
return prediction
# Now add the API endpoints for recognizing, learning and
# so on. If you want to use this in any public setup, you
# should add rate limiting, auth tokens and so on.
@app.route('/api/recognize', methods=['GET', 'POST'])
def identify():
if request.headers['Content-Type'] == 'application/json':
try:
image_data = request.json['image']
except:
raise WebAppException(error_code=MISSING_ARGUMENTS)
prediction = get_prediction(image_data)
response = jsonify(name = prediction)
return response
else:
raise WebAppException(error_code=INVALID_FORMAT)
# And now let's do this!
if __name__ == '__main__':
# A long description:
long_description = ("server.py is a simple facerec webservice. It provides "
"you with a simple RESTful API to recognize faces from a "
"computed model. Please don't use this server in a production "
"environment, as it provides no security and there might be "
"ugly concurrency issues with the global state of the model." )
print "=== Description ==="
print long_description
# Parse the command line:
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-t", "--train", action="store", dest="dataset", default=None,
help="Calculates a new model from a given CSV file. CSV format: <person>;</path/to/image/folder>.", required=False)
parser.add_argument("-a", "--address", action="store", dest="host", default="0.0.0.0",
help="Sets the endpoint for this server.", required=False)
parser.add_argument("-p", "--port", action="store", dest="port", default=5000,
help="Sets the port for this server.", required=False)
parser.add_argument('model_filename', nargs='?', help="Filename of the model to use or store")
# Print Usage:
print "=== Usage ==="
parser.print_help()
# Parse the Arguments:
args = parser.parse_args()
# Uh, this is ugly...
global model
# If a DataSet is given, we want to work with it:
if args.dataset:
# Learn the new model with the dataset given:
model = recognition.get_model_from_csv(filename=args.dataset,out_model_filename=args.model_filename)
else:
model = recognition.load_model_file(args.model_filename)
# Finally start the server:
print "=== Server Log (also in %s) ===" % (LOG_FILENAME)
app.run(host=args.host, port=args.port, debug=True, use_reloader=False, threaded=False)
|
{
"content_hash": "a2b6e2dfe28a6f018d7e60ab79db3189",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 123,
"avg_line_length": 35.70044052863436,
"alnum_prop": 0.6859575518262586,
"repo_name": "idf/FaceReader",
"id": "fee9528068754f9ce1675bdf374f2fe602f8e479",
"size": "9743",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "facerec_py/apps/webapp/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "4151"
},
{
"name": "Python",
"bytes": "251899"
}
],
"symlink_target": ""
}
|
import base64
import pytest
from werkzeug.test import Client
from util import dev_login
def test_get_roles(client: Client) -> None:
with dev_login(client, 'admin'):
resp = client.get('/api/v1/role')
assert resp.status_code == 200
assert 'roles' in resp.json
assert isinstance(resp.json['roles'], list)
def test_get_roles_unauthorized(client: Client) -> None:
with dev_login(client, 'user'):
resp = client.get('/api/v1/role')
assert resp.status_code == 403
@pytest.mark.parametrize(('user', 'role', 'expected'), [
('admin', 'blacklisted', True),
('facstaff', 'blacklisted', False),
('admin', 'whitelisted', True),
('facstaff', 'whitelisted', True),
('user', 'whitelisted', False),
])
def test_get_role_text(client: Client, user: str, role: str, expected: bool) -> None:
with dev_login(client, user):
resp = client.get(f'/api/v1/role/{role}/text')
if expected:
assert resp.status_code == 200
assert 'text' in resp.json
assert isinstance(resp.json['text'], dict)
else:
assert resp.status_code == 403
@pytest.mark.parametrize(('user', 'role', 'expected'), [
('admin', 'blacklisted', True),
('facstaff', 'whitelisted', True),
('user', 'whitelisted', False),
('facstaff', 'blacklisted', False),
])
def test_get_role_entities(client: Client, user: str, role: str, expected: bool) -> None:
with dev_login(client, user):
resp = client.get(f'/api/v1/role/{role}/entity')
if expected:
assert resp.status_code == 200
assert 'entities' in resp.json
assert isinstance(resp.json['entities'], list)
else:
assert resp.status_code == 403
@pytest.mark.parametrize(('role', 'entity', 'expected'), [
('whitelisted', 'DEV_ADMIN', True),
('blocked_url', 'https://google.com/something', True),
('blocked_url', '!!$*#$*#(*#', False),
])
def test_validate_entity(client: Client, role: str, entity: str, expected: bool) -> None:
entity_b32 = str(base64.b32encode(bytes(entity, 'utf8')), 'utf8')
with dev_login(client, 'admin'):
resp = client.get(f'/api/v1/role/{role}/validate_entity/{entity_b32}')
assert resp.status_code == 200
assert resp.json['valid'] is expected
@pytest.mark.parametrize(('user', 'role'), [
('facstaff', 'blacklisted'),
('user', 'whitelisted'),
])
def test_validate_entity_unauthorized(client: Client, user: str, role: str) -> None:
entity = 'entity'
entity_b32 = str(base64.b32encode(bytes(entity, 'utf8')), 'utf8')
with dev_login(client, user):
resp = client.get(f'/api/v1/role/{role}/validate_entity/{entity_b32}')
assert resp.status_code == 403
@pytest.mark.parametrize(('user', 'role', 'entity'), [
('admin', 'blacklisted', 'DEV_USER'),
('facstaff', 'whitelisted', 'DEV_PWR_USER'),
])
def test_grant_revoke_role(client: Client, user: str, role: str, entity: str) -> None:
entity_b32 = str(base64.b32encode(bytes(entity, 'utf8')), 'utf8')
with dev_login(client, user):
# Grant the role to the entity
resp = client.put(f'/api/v1/role/{role}/entity/{entity_b32}', json={})
assert resp.status_code == 204
# Check that the entity has the role
resp = client.get(f'/api/v1/role/{role}/entity')
assert resp.status_code == 200
assert any(ent['entity'] == entity for ent in resp.json['entities'])
# Revoke the role from the entity
resp = client.delete(f'/api/v1/role/{role}/entity/{entity_b32}')
assert resp.status_code == 204
# Check that the entity no longer has the role
resp = client.get(f'/api/v1/role/{role}/entity')
assert resp.status_code == 200
assert not any(ent['entity'] == entity for ent in resp.json['entities'])
@pytest.mark.parametrize(('user', 'role', 'entity'), [
('facstaff', 'blacklisted', 'DEV_USER'),
('user', 'whitelisted', 'DEV_FACSTAFF'),
])
def test_grant_revoke_role_unauthorized(client: Client, user: str, role: str, entity: str) -> None:
entity_b32 = str(base64.b32encode(bytes(entity, 'utf8')), 'utf8')
with dev_login(client, user):
# Check that we cannot grant the role
resp = client.put(f'/api/v1/role/{role}/entity/{entity_b32}', json={})
assert resp.status_code == 403
# Check that we cannot revoke the role
resp = client.delete(f'/api/v1/role/{role}/entity/{entity_b32}')
assert resp.status_code == 403
|
{
"content_hash": "bb8d5bff6ad092bc4ca8c72280355598",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 99,
"avg_line_length": 35.71875,
"alnum_prop": 0.6122047244094488,
"repo_name": "oss/shrunk",
"id": "95c58646d6e20401330f193ecfd35fc94541b02a",
"size": "4572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/tests/test_api_role.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1058"
},
{
"name": "HTML",
"bytes": "5703"
},
{
"name": "Haskell",
"bytes": "2553"
},
{
"name": "JavaScript",
"bytes": "15039"
},
{
"name": "Less",
"bytes": "7542"
},
{
"name": "Python",
"bytes": "238286"
},
{
"name": "Shell",
"bytes": "6516"
},
{
"name": "TypeScript",
"bytes": "198320"
}
],
"symlink_target": ""
}
|
import logging
import os
import subprocess
import threading
import time
import uuid
from devil.utils import reraiser_thread
from pylib import constants
_MINIUMUM_TIMEOUT = 3.0
_PER_LINE_TIMEOUT = .002 # Should be able to process 500 lines per second.
_PROCESS_START_TIMEOUT = 10.0
_MAX_RESTARTS = 10 # Should be plenty unless tool is crashing on start-up.
class Deobfuscator:
def __init__(self, mapping_path):
script_path = os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'android',
'stacktrace', 'java_deobfuscate.py')
cmd = [script_path, mapping_path]
# Allow only one thread to call TransformLines() at a time.
self._lock = threading.Lock()
# Ensure that only one thread attempts to kill self._proc in Close().
self._close_lock = threading.Lock()
self._closed_called = False
# Assign to None so that attribute exists if Popen() throws.
self._proc = None
# Start process eagerly to hide start-up latency.
self._proc_start_time = time.time()
self._proc = subprocess.Popen(cmd,
bufsize=1,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
close_fds=True)
def IsClosed(self):
return self._closed_called or self._proc.returncode is not None
def IsBusy(self):
return self._lock.locked()
def IsReady(self):
return not self.IsClosed() and not self.IsBusy()
def TransformLines(self, lines):
"""Deobfuscates obfuscated names found in the given lines.
If anything goes wrong (process crashes, timeout, etc), returns |lines|.
Args:
lines: A list of strings without trailing newlines.
Returns:
A list of strings without trailing newlines.
"""
if not lines:
return []
# Deobfuscated stacks contain more frames than obfuscated ones when method
# inlining occurs. To account for the extra output lines, keep reading until
# this eof_line token is reached.
eof_line = uuid.uuid4().hex
out_lines = []
def deobfuscate_reader():
while True:
line = self._proc.stdout.readline()
# Return an empty string at EOF (when stdin is closed).
if not line:
break
line = line[:-1]
if line == eof_line:
break
out_lines.append(line)
if self.IsBusy():
logging.warning('deobfuscator: Having to wait for Java deobfuscation.')
# Allow only one thread to operate at a time.
with self._lock:
if self.IsClosed():
if not self._closed_called:
logging.warning('deobfuscator: Process exited with code=%d.',
self._proc.returncode)
self.Close()
return lines
# TODO(agrieve): Can probably speed this up by only sending lines through
# that might contain an obfuscated name.
reader_thread = reraiser_thread.ReraiserThread(deobfuscate_reader)
reader_thread.start()
try:
self._proc.stdin.write('\n'.join(lines))
self._proc.stdin.write('\n{}\n'.format(eof_line))
self._proc.stdin.flush()
time_since_proc_start = time.time() - self._proc_start_time
timeout = (max(0, _PROCESS_START_TIMEOUT - time_since_proc_start) +
max(_MINIUMUM_TIMEOUT, len(lines) * _PER_LINE_TIMEOUT))
reader_thread.join(timeout)
if self.IsClosed():
logging.warning(
'deobfuscator: Close() called by another thread during join().')
return lines
if reader_thread.is_alive():
logging.error('deobfuscator: Timed out.')
self.Close()
return lines
return out_lines
except IOError:
logging.exception('deobfuscator: Exception during java_deobfuscate')
self.Close()
return lines
def Close(self):
with self._close_lock:
needs_closing = not self.IsClosed()
self._closed_called = True
if needs_closing:
self._proc.stdin.close()
self._proc.kill()
self._proc.wait()
def __del__(self):
# self._proc is None when Popen() fails.
if not self._closed_called and self._proc:
logging.error('deobfuscator: Forgot to Close()')
self.Close()
class DeobfuscatorPool:
# As of Sep 2017, each instance requires about 500MB of RAM, as measured by:
# /usr/bin/time -v build/android/stacktrace/java_deobfuscate.py \
# out/Release/apks/ChromePublic.apk.mapping
def __init__(self, mapping_path, pool_size=4):
self._mapping_path = mapping_path
self._pool = [Deobfuscator(mapping_path) for _ in range(pool_size)]
# Allow only one thread to select from the pool at a time.
self._lock = threading.Lock()
self._num_restarts = 0
def TransformLines(self, lines):
with self._lock:
assert self._pool, 'TransformLines() called on a closed DeobfuscatorPool.'
# De-obfuscation is broken.
if self._num_restarts == _MAX_RESTARTS:
raise Exception('Deobfuscation seems broken.')
# Restart any closed Deobfuscators.
for i, d in enumerate(self._pool):
if d.IsClosed():
logging.warning('deobfuscator: Restarting closed instance.')
self._pool[i] = Deobfuscator(self._mapping_path)
self._num_restarts += 1
if self._num_restarts == _MAX_RESTARTS:
logging.warning('deobfuscator: MAX_RESTARTS reached.')
selected = next((x for x in self._pool if x.IsReady()), self._pool[0])
# Rotate the order so that next caller will not choose the same one.
self._pool.remove(selected)
self._pool.append(selected)
return selected.TransformLines(lines)
def Close(self):
with self._lock:
for d in self._pool:
d.Close()
self._pool = None
|
{
"content_hash": "81db4ec2b7d4dfaba1548e4ec20c9e8c",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 80,
"avg_line_length": 33.94252873563219,
"alnum_prop": 0.6244497121571283,
"repo_name": "scheib/chromium",
"id": "6fe86f408d5d89d04009c1523048f717a70d5545",
"size": "6069",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "build/android/pylib/symbols/deobfuscator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Adapter classes and utilities for use with Reactive interfaces"""
from __future__ import absolute_import
import itertools
import re
import weakref
import six
import charms.reactive as reactive
import charms.reactive.bus
import charmhelpers.contrib.hahelpers.cluster as ch_cluster
import charmhelpers.contrib.network.ip as ch_ip
import charmhelpers.contrib.openstack.utils as ch_utils
import charmhelpers.core.hookenv as hookenv
import charmhelpers.core.host as ch_host
import charms_openstack.ip as os_ip
import charms_openstack.os_release_data as os_release_data
ADDRESS_TYPES = os_ip.ADDRESS_MAP.keys()
# handle declarative adapter properties using a decorator and simple functions
# Hold the custom adapter properties somewhere!
_custom_adapter_properties = {}
def adapter_property(interface_name):
"""Decorator to take the interface name and add a custom property.
These are used to generate custom Adapter classes automatically for the
charm author which are then plugged into the class. The adapter class is
built using a different function.
:param interface_name: the name of the interface to add the property to
"""
def wrapper(f):
property_name = f.__name__
if interface_name not in _custom_adapter_properties:
_custom_adapter_properties[interface_name] = {}
if property_name in _custom_adapter_properties[interface_name]:
raise RuntimeError(
"Property name '{}' used more than once for '{} interface?"
.format(property_name, interface_name))
_custom_adapter_properties[interface_name][property_name] = f
return f
return wrapper
# declaring custom configuration properties:
# Hold the custom configuration adapter properties somewhere!
_custom_config_properties = {}
def config_property(f):
"""Decorator to add a custom configuration property.
These are used to generate a custom ConfigurationAdapter for use when
automatically creating a Charm class
:param f: the function passed as part of the @decorator syntax
"""
property_name = f.__name__
if property_name in _custom_config_properties:
raise RuntimeError(
"Property name '{}' used more than once for configuration?"
.format(property_name))
_custom_config_properties[property_name] = f
return f
##
class OpenStackRelationAdapter(object):
"""
Base adapter class for all OpenStack related adapters.
"""
interface_type = None
"""
The generic type of the interface the adapter is wrapping.
"""
def __init__(self, relation=None, accessors=None, relation_name=None):
"""Class will usually be initialised using the 'relation' option to
pass in an instance of a interface class. If there is no relation
class yet available then 'relation_name' can be used instead.
:param relation: Instance of an interface class
:param accessors: List of accessible interfaces properties
:param relation_name: String name of relation
"""
self.relation = relation
if relation and relation_name:
raise ValueError('Cannot speciiy relation and relation_name')
if relation:
self.accessors = accessors or []
self._setup_properties()
else:
self._relation_name = relation_name
@property
def relation_name(self):
"""
Name of the relation this adapter is handling.
"""
if self.relation:
return self.relation.relation_name
else:
return self._relation_name
def _setup_properties(self):
"""
Setup property based accessors for an interfaces
auto accessors
Note that the accessor is dynamic as each access calls the underlying
getattr() for each property access.
"""
self.accessors.extend(self.relation.auto_accessors)
for field in self.accessors:
meth_name = field.replace('-', '_')
# Get the relation property dynamically
# Note the additional lambda name: is to create a closure over
# meth_name so that a new 'name' gets created for each loop,
# otherwise the same variable meth_name is referenced in each of
# the internal lambdas. i.e. this is (lambda x: ...)(value)
setattr(self.__class__,
meth_name,
(lambda name: property(
lambda self: getattr(
self.relation, name)()))(meth_name))
class RabbitMQRelationAdapter(OpenStackRelationAdapter):
"""
Adapter for the RabbitMQRequires relation interface.
"""
interface_type = "messaging"
def __init__(self, relation):
add_accessors = ['vhost', 'username']
super(RabbitMQRelationAdapter, self).__init__(relation, add_accessors)
@property
def host(self):
"""
Hostname that should be used to access RabbitMQ.
"""
if self.vip:
return self.vip
else:
return self.private_address
@property
def hosts(self):
"""
Comma separated list of hosts that should be used
to access RabbitMQ.
"""
hosts = self.relation.rabbitmq_hosts()
if len(hosts) > 1:
return ','.join(hosts)
else:
return None
@property
def ssl_data_complete(self):
return self.relation.ssl_data_complete()
@property
def ssl_ca_file(self):
return '/var/lib/charm/{}/rabbit-client-ca.pem'.format(
hookenv.service_name())
class PeerHARelationAdapter(OpenStackRelationAdapter):
"""
Adapter for cluster relation of nodes of the same service
"""
interface_type = "cluster"
def __init__(self, relation=None, relation_name=None):
"""Map of local units addresses for each address type
:param relation: Instance of openstack-ha relation
:param relation_name: Name of relation if openstack-ha relation is
not available e.g. 'cluster'
NOTE: This excludes private-address
@return dict of backends and networks for local unit e.g.
{'this_unit_admin_addr': {
'backends': {
'this_unit-1': 'this_unit_admin_addr'},
'network': 'this_unit_admin_addr/admin_netmask'},
'this_unit_internal_addr': {
'backends': {
'this_unit-1': 'this_unit_internal_addr'},
'network': 'this_unit_internal_addr/internal_netmask'},
'this_unit_public_addr': {
'backends': {
'this_unit-1': 'this_unit_public_addr'},
'network': 'this_unit_public_addr/public_netmask'}}
"""
super(PeerHARelationAdapter, self).__init__(
relation=relation,
relation_name=relation_name)
self.config = hookenv.config()
self.api_config_adapter = APIConfigurationAdapter()
self.local_address = self.api_config_adapter.local_address
self.local_unit_name = self.api_config_adapter.local_unit_name
self.cluster_hosts = {}
if relation:
self.add_network_split_addresses()
self.add_default_addresses()
@property
def internal_addresses(self):
"""Return list of internal addresses of this unit and peers
Return list of internal addresses of this unit and peers. If no
internal address cidr has been set return private addresses.
@return list [ip1, ip2, ...]
"""
cfg_opt = os_ip.ADDRESS_MAP[os_ip.INTERNAL]['config']
int_net = self.config.get(cfg_opt)
laddr = ch_ip.get_address_in_network(int_net) or self.local_address
try:
hosts = sorted(
list(self.cluster_hosts[laddr]['backends'].values()))
except KeyError:
hosts = [laddr]
return hosts
@property
def single_mode_map(self):
"""Return map of local addresses only if this is a single node cluster
@return dict of local address info e.g.
{'cluster_hosts':
{'this_unit_private_addr': {
'backends': {
'this_unit-1': 'this_unit_private_addr'},
'network': 'this_unit_private_addr/private_netmask'},
'internal_addresses': ['intaddr']}
"""
relation_info = {}
try:
cluster_relid = hookenv.relation_ids('cluster')[0]
if not hookenv.related_units(relid=cluster_relid):
relation_info = {
'cluster_hosts': self.local_default_addresses(),
'internal_addresses': self.internal_addresses,
}
net_split = self.local_network_split_addresses()
for key in net_split.keys():
relation_info['cluster_hosts'][key] = net_split[key]
except IndexError:
pass
return relation_info
def local_network_split_addresses(self):
"""Map of local units addresses for each address type
NOTE: This excludes private-address
@return dict of backends and networks for local unit e.g.
{'this_unit_admin_addr': {
'backends': {
'this_unit-1': 'this_unit_admin_addr'},
'network': 'this_unit_admin_addr/admin_netmask'},
'this_unit_internal_addr': {
'backends': {
'this_unit-1': 'this_unit_internal_addr'},
'network': 'this_unit_internal_addr/internal_netmask'},
'this_unit_public_addr': {
'backends': {
'this_unit-1': 'this_unit_public_addr'},
'network': 'this_unit_public_addr/public_netmask'}}
"""
config = hookenv.config()
_cluster_hosts = {}
for addr_type in ADDRESS_TYPES:
cfg_opt = os_ip.ADDRESS_MAP[addr_type]['config']
laddr = ch_ip.get_address_in_network(config.get(cfg_opt))
if laddr:
netmask = ch_ip.get_netmask_for_address(laddr)
_cluster_hosts[laddr] = {
'network': "{}/{}".format(laddr, netmask),
'backends': {self.local_unit_name: laddr}}
return _cluster_hosts
def local_default_addresses(self):
"""Map of local units private address
@return dict of private address info local unit e.g.
{'this_unit_private_addr': {
'backends': {
'this_unit-1': 'this_unit_private_addr'},
'network': 'this_unit_private_addr/private_netmask'}}
"""
netmask = ch_ip.get_netmask_for_address(self.local_address)
_local_map = {
self.local_address: {
'network': "{}/{}".format(self.local_address, netmask),
'backends': {self.local_unit_name: self.local_address}}}
return _local_map
def add_network_split_addresses(self):
"""Populate cluster_hosts with addresses of this unit and its
peers on each address type
@return None
"""
for addr_type in ADDRESS_TYPES:
cfg_opt = os_ip.ADDRESS_MAP[addr_type]['config']
laddr = ch_ip.get_address_in_network(self.config.get(cfg_opt))
if laddr:
self.cluster_hosts[laddr] = \
self.local_network_split_addresses()[laddr]
key = '{}-address'.format(
os_ip.ADDRESS_MAP[addr_type]['binding'])
for _unit, _laddr in self.relation.ip_map(address_key=key):
if _laddr:
self.cluster_hosts[laddr]['backends'][_unit] = _laddr
def add_default_addresses(self):
"""Populate cluster_hosts with private-address of this unit and its
peers
@return None
"""
self.cluster_hosts[self.local_address] = \
self.local_default_addresses()[self.local_address]
for _unit, _laddr in self.relation.ip_map():
self.cluster_hosts[self.local_address]['backends'][_unit] = _laddr
class DatabaseRelationAdapter(OpenStackRelationAdapter):
"""
Adapter for the Database relation interface.
"""
interface_type = "database"
def __init__(self, relation):
add_accessors = ['password', 'username', 'database']
super(DatabaseRelationAdapter, self).__init__(relation, add_accessors)
@property
def host(self):
"""
Hostname that should be used to access RabbitMQ.
"""
return self.relation.db_host()
@property
def type(self):
return 'mysql'
def get_uri(self, prefix=None):
if prefix:
uri = 'mysql://{}:{}@{}/{}'.format(
self.relation.username(prefix=prefix),
self.relation.password(prefix=prefix),
self.host,
self.relation.database(prefix=prefix),
)
else:
uri = 'mysql://{}:{}@{}/{}'.format(
self.username,
self.password,
self.host,
self.database,
)
try:
if self.ssl_ca:
uri = '{}?ssl_ca={}'.format(uri, self.ssl_ca)
if self.ssl_cert:
uri = ('{}&ssl_cert={}&ssl_key={}'
.format(uri, self.ssl_cert, self.ssl_key))
except AttributeError:
# ignore ssl_ca or ssl_cert if not available
pass
return uri
@property
def uri(self):
return self.get_uri()
def make_default_options(base_cls=None, charm_instance=None):
"""Create a default, customised ConfigurationAdapter, or derived class
(based on the base_cls) using any custom properties that might have been
made.
If base_cls is None, the the default ConfigurationAdapter will be used.
:param base_cls: a ConfigurationAdapter or derived class
:param charm_instance: the charm instance to plug into the options.
"""
return make_default_configuration_adapter_class(
base_cls=base_cls,
custom_properties=_custom_config_properties)(
charm_instance=charm_instance)
def make_default_configuration_adapter_class(base_cls=None,
custom_properties=None):
"""Create a default configuration adapter, using the base type specified
and any customer configuration properties.
This is called by the charm creation metaclass when 'bringing' up the class
if no configuration adapter has been specified in the adapters_class
:param base_cls: a ConfigurationAdapter derived class; or None
:param custom_properties: the name:function for the properties to set.
"""
base_cls = base_cls or ConfigurationAdapter
# if there are no custom properties, just return the base_cls
if not custom_properties:
return base_cls
# turns the functions into properties on the class
properties = {n: property(f) for n, f in six.iteritems(custom_properties)}
# build a custom class with the custom properties
return type('DefaultConfigurationAdapter', (base_cls, ), properties)
class ConfigurationAdapter(object):
"""
Configuration Adapter which provides python based access
to all configuration options for the current charm.
It also holds a weakref to the instance of the OpenStackCharm derived class
that it is associated with. This is so that methods on the configuration
adapter can query the charm class for global config (e.g. service_name).
The configuration items from Juju are copied over and the '-' are replaced
with '_'. This allows them to be used directly on the instance.
"""
def __init__(self, charm_instance=None):
"""Create a ConfigurationAdapter (or derived) class.
:param charm_instance: the instance of the OpenStackCharm derived
class.
"""
self._charm_instance_weakref = None
if charm_instance is not None:
self._charm_instance_weakref = weakref.ref(charm_instance)
# copy over (statically) the items of the charms Juju configuration
_config = hookenv.config()
for k, v in six.iteritems(_config):
k = k.replace('-', '_')
setattr(self, k, v)
@property
def charm_instance(self):
"""Return the reference to the charm_instance or return None"""
if self._charm_instance_weakref:
return self._charm_instance_weakref()
return None
class APIConfigurationAdapter(ConfigurationAdapter):
"""This configuration adapter extends the base class and adds properties
common accross most OpenstackAPI services
"""
def __init__(self, port_map=None, service_name=None, charm_instance=None):
"""
Note passing port_map and service_name is deprecated, but supporte for
backwards compatibility. The port_map and service_name can be got from
the self.charm_instance weak reference.
:param port_map: Map containing service names and the ports used e.g.
port_map = {
'svc1': {
'admin': 9001,
'public': 9001,
'internal': 9001,
},
'svc2': {
'admin': 9002,
'public': 9002,
'internal': 9002,
},
}
:param service_name: Name of service being deployed
:param charm_instance: a charm instance that will be passed to the base
constructor
"""
super(APIConfigurationAdapter, self).__init__(
charm_instance=charm_instance)
if port_map is not None:
hookenv.log(
"DEPRECATION: should not use port_map parameter in "
"APIConfigurationAdapter.__init__()", level=hookenv.WARNING)
self.port_map = port_map
elif self.charm_instance is not None:
self.port_map = self.charm_instance.api_ports
else:
self.port_map = None
if service_name is not None:
hookenv.log(
"DEPRECATION: should not use service_name parameter in "
"APIConfigurationAdapter.__init__()", level=hookenv.WARNING)
self.service_name = service_name
elif self.charm_instance is not None:
self.service_name = self.charm_instance.name
else:
self.service_name = None
self.__network_addresses = None
@property
def network_addresses(self):
"""Return the network_addresses as a property for a consuming template.
See APIConfigurationAdapter.get_network_addresses() for detail on the
return type.
"""
# cache and lazy resolve the network addresses - also helps with unit
# testing
if self.__network_addresses is None:
self.__network_addresses = self.get_network_addresses()
return self.__network_addresses
@property
def external_ports(self):
"""Return ports the service will be accessed on
The self.port_map is a dictionary of dictionarys, where the ports are
two levels deep (the leaves). This returns a set() of those ports.
@return set of ports service can be accessed on
"""
# the map take the first list of dictionaries to extract the 2nd level
# of values.
return set(itertools.chain(*map(lambda x: x.values(),
self.port_map.values())))
@property
def ipv6_mode(self):
"""Return if charm should enable IPv6
@return True if user has requested ipv6 support otherwise False
"""
return getattr(self, 'prefer_ipv6', False)
@property
def local_address(self):
"""Return remotely accessible address of charm (not localhost)
@return True if user has requested ipv6 support otherwise False
"""
if self.ipv6_mode:
addr = ch_ip.get_ipv6_addr(exc_list=[self.vip])[0]
else:
addr = ch_utils.get_host_ip(
hookenv.unit_get('private-address'))
return addr
@property
def local_unit_name(self):
"""
@return local unit name
"""
return hookenv.local_unit().replace('/', '-')
@property
def local_host(self):
"""Return localhost address depending on whether IPv6 is enabled
@return localhost ip address
"""
return 'ip6-localhost' if self.ipv6_mode else '127.0.0.1'
@property
def haproxy_host(self):
"""Return haproxy bind address depending on whether IPv6 is enabled
@return address
"""
return '::' if self.ipv6_mode else '0.0.0.0'
@property
def haproxy_stat_port(self):
"""Port to listen on to access haproxy statistics
@return port
"""
return '8888'
@property
def haproxy_stat_password(self):
"""Password for accessing haproxy statistics
@return password
"""
return charms.reactive.bus.get_state('haproxy.stat.password')
@property
def service_ports(self):
"""Dict of service names and the ports they listen on
@return {'svc1': ['portA', 'portB'], 'svc2': ['portC', 'portD'], ...}
"""
service_ports = {}
if self.port_map:
for service in self.port_map.keys():
port_types = sorted(list(self.port_map[service].keys()))
for port_type in port_types:
listen_port = self.port_map[service][port_type]
key = '{}_{}'.format(service, port_type)
used_ports = [v[0] for v in service_ports.values()]
if listen_port in used_ports:
hookenv.log("Not adding haproxy listen stanza for {} "
"port is already in use".format(key),
level=hookenv.WARNING)
continue
service_ports[key] = [
self.port_map[service][port_type],
ch_cluster.determine_apache_port(
self.port_map[service][port_type],
singlenode_mode=True)]
return service_ports
@property
def apache_enabled(self):
"""Whether apache is being used for this service
@return True if apache2 os being used for this service
"""
return charms.reactive.bus.get_state('ssl.enabled')
def determine_service_port(self, port):
"""Calculate port service should use given external port
Haproxy fronts connections for a service and may pass connections to
Apache for SSL termination. Is Apache is being used:
Haproxy listens on N
Apache listens on N-10
Service listens on N-20
else
Haproxy listens on N
Service listens on N-10
:param int port: port service uses for external connections
@return int port: port backend service should use
"""
i = 10
if self.apache_enabled:
i = 20
return (port - i)
@property
def service_listen_info(self):
"""Dict of service names and attributes for backend to listen on
@return {
'svc1': {
'proto': 'http',
'ip': '10.0.0.10',
'port': '8080',
'url': 'http://10.0.0.10:8080},
'svc2': {
'proto': 'https',
'ip': '10.0.0.20',
'port': '8443',
'url': 'https://10.0.0.20:8443},
...
"""
info = {}
ip = self.local_host if self.apache_enabled else self.local_address
if self.port_map:
for service in self.port_map.keys():
key = service.replace('-', '_')
info[key] = {
'proto': 'http',
'ip': ip,
'port': self.determine_service_port(
self.port_map[service]['admin'])}
for port_type in self.port_map[service].keys():
port_key = '{}_port'.format(port_type)
info[key][port_key] = self.determine_service_port(
self.port_map[service][port_type])
info[key]['url'] = '{proto}://{ip}:{port}'.format(**info[key])
return info
@property
def external_endpoints(self):
"""Dict of service names and attributes that clients use to connect
@return {
'svc1': {
'proto': 'http',
'ip': '10.0.0.10',
'port': '8080',
'url': 'http://10.0.0.10:8080},
'svc2': {
'proto': 'https',
'ip': '10.0.0.20',
'port': '8443',
'url': 'https://10.0.0.20:8443},
...
"""
info = {}
# Bug #1640393. Return self.local_address if vip is undefined, None or
# an empty string.
ip = getattr(self, 'vip', None) or self.local_address
proto = 'https' if self.apache_enabled else 'http'
if self.port_map:
for service in self.port_map.keys():
key = service.replace('-', '_')
info[key] = {
'proto': proto,
'ip': ip,
'port': self.port_map[service]['admin']}
info[key]['url'] = '{proto}://{ip}:{port}'.format(**info[key])
return info
def get_network_addresses(self):
"""For each network configured, return corresponding address and vip
(if available).
Returns a list of tuples of the form:
[(address_in_net_a, vip_in_net_a),
(address_in_net_b, vip_in_net_b),
...]
or, if no vip(s) available:
[(address_in_net_a, address_in_net_a),
(address_in_net_b, address_in_net_b),
...]
"""
addresses = []
for net_type in ADDRESS_TYPES:
net_cfg_opt = os_ip.ADDRESS_MAP[net_type]['config'].replace('-',
'_')
config_cidr = getattr(self, net_cfg_opt, None)
addr = ch_ip.get_address_in_network(
config_cidr,
hookenv.unit_get('private-address'))
addresses.append(
(addr, os_ip.resolve_address(endpoint_type=net_type)))
return sorted(addresses)
@property
def endpoints(self):
"""List of endpoint information.
Endpoint information used to configure apache
Client -> endpoint -> address:ext_port -> local:int_port
NOTE: endpoint map be a vi
returns [
(address1, endpoint1, ext_port1, int_port1),
(address2, endpoint2, ext_port2, int_port2)
...
]
"""
endpoints = []
for address, endpoint in sorted(set(self.network_addresses)):
for api_port in self.external_ports:
ext_port = ch_cluster.determine_apache_port(
api_port,
singlenode_mode=True)
int_port = ch_cluster.determine_api_port(
api_port,
singlenode_mode=True)
portmap = (address, endpoint, int(ext_port), int(int_port))
endpoints.append(portmap)
return endpoints
@property
def ext_ports(self):
""" List of endpoint ports
@returns List of ports
"""
eps = [ep[2] for ep in self.endpoints]
return sorted(list(set(eps)))
@property
def use_memcache(self):
release = ch_utils.get_os_codename_install_source(
self.openstack_origin)
if release not in os_release_data.KNOWN_RELEASES:
return ValueError("Unkown release {}".format(release))
return (os_release_data.KNOWN_RELEASES.index(release) >=
os_release_data.KNOWN_RELEASES.index('mitaka'))
@property
def memcache_server(self):
if ch_host.lsb_release()['DISTRIB_RELEASE'] > '14.04':
memcache_server = '::1'
else:
memcache_server = 'ip6-localhost'
return memcache_server
@property
def memcache_host(self):
return '[::1]'
@property
def memcache_port(self):
return '11211'
@property
def memcache_url(self):
return 'inet6:{}:{}'.format(self.memcache_host, self.memcache_port)
def make_default_relation_adapter(base_cls, relation, properties):
"""Create a default relation adapter using a base class, and custom
properties for various relations that may have been defined as custom
properties.
This mixes the declarative 'custom' properties + with the default classes
to provide a class that manages the relation for the charm.
This mixes the associated RelationAdapter class with the custom relations.
:param base_cls: the class to use as the base for the properties
:param relation: the relation we want the properties for
:param properties: {key: function} functions to make custom properties
"""
# Just return the base_cls if there's nothing to modify
if not properties:
return base_cls
# convert the functions into properties
props = {n: property(f) for n, f in six.iteritems(properties)}
# turn 'my-Something_interface' into 'MySomethingInterface'
# future proof incase other chars come in which can't be in an Python Class
# name.
relation = re.sub(r'[^a-zA-Z_-]', '', relation)
parts = relation.replace('-', '_').lower().split('_')
header = ''.join([s.capitalize() for s in parts])
name = "{}RelationAdapterModified".format(header)
# and make the class
return type(name, (base_cls,), props)
class OpenStackRelationAdapters(object):
"""
Base adapters class for OpenStack Charms, used to aggregate
the relations associated with a particular charm so that their
properties can be accessed using dot notation, e.g:
adapters.amqp.private_address
"""
relation_adapters = {}
"""
Dictionary mapping relation names to adapter classes, e.g:
relation_adapters = {
'amqp': RabbitMQRelationAdapter,
}
By default, relations will be wrapped in an OpenStackRelationAdapter.
Each derived class can define their OWN relation_adapters and they will
overlay on the class further back in the class hierarchy, according to the
mro() for the class.
"""
def __init__(self, relations, options=None, options_instance=None,
charm_instance=None):
"""
:param relations: List of instances of relation classes
:param options: Configuration class to use (DEPRECATED)
:param options_instance: Instance of Configuration class to use
:param charm_instance: optional charm_instance that is captured as a
weakref for use on the adapter.
"""
self._charm_instance_weakref = None
if charm_instance is not None:
self._charm_instance_weakref = weakref.ref(charm_instance)
self._relations = set()
if options is not None:
hookenv.log("The 'options' argument is deprecated please use "
"options_instance instead.", level=hookenv.WARNING)
self.options = options()
elif options_instance is not None:
self.options = options_instance
else:
# create a default, customised ConfigurationAdapter if the
# APIConfigurationAdapter is needed as a base, then it must be
# passed as an instance on the options_instance First pull the
# configuration class from the charm instance (if it's available).
base_cls = None
if self.charm_instance:
base_cls = getattr(self.charm_instance, 'configuration_class',
base_cls)
self.options = make_default_options(base_cls, self.charm_instance)
self._relations.add('options')
# walk the mro() from object to this class to build up the _adapters
# ensure that all of the relations' have their '-' turned into a '_' to
# ensure that everything is consistent in the class.
self._adapters = {}
for cls in reversed(self.__class__.mro()):
self._adapters.update(
{k.replace('-', '_'): v
for k, v in six.iteritems(
getattr(cls, 'relation_adapters', {}))})
# now we have to add in any customisations to those adapters
for relation, properties in six.iteritems(_custom_adapter_properties):
relation = relation.replace('-', '_')
try:
cls = self._adapters[relation]
except KeyError:
cls = OpenStackRelationAdapter
self._adapters[relation] = make_default_relation_adapter(
cls, relation, properties)
self.add_relations(relations)
@property
def charm_instance(self):
"""Return the reference to the charm_instance or return None"""
if self._charm_instance_weakref:
return self._charm_instance_weakref()
return None
def __iter__(self):
"""
Iterate over the relations presented to the charm.
"""
for relation in self._relations:
yield relation, getattr(self, relation)
def add_relations(self, relations):
"""Add the relations to this adapters instance for use as a context.
:params relations: list of RAW reactive relation instances.
"""
for relation in relations:
self.add_relation(relation)
def add_relation(self, relation):
"""Add the relation to this adapters instance for use as a context.
:param relation: a RAW reactive relation instance
"""
adapter_name, adapter = self.make_adapter(relation)
setattr(self, adapter_name, adapter)
self._relations.add(adapter_name)
def make_adapter(self, relation):
"""Make an adapter from a reactive relation.
This returns the relation_name and the adapter instance based on the
registered custom adapter classes and any customised properties on
those adapter classes.
:param relation: a RelationBase derived reactive relation
:returns (string, OpenstackRelationAdapter-derived): see above.
"""
relation_name = relation.relation_name.replace('-', '_')
try:
adapter = self._adapters[relation_name](relation)
except KeyError:
adapter = OpenStackRelationAdapter(relation)
return relation_name, adapter
class OpenStackAPIRelationAdapters(OpenStackRelationAdapters):
relation_adapters = {
'amqp': RabbitMQRelationAdapter,
'shared_db': DatabaseRelationAdapter,
'cluster': PeerHARelationAdapter,
}
def __init__(self, relations, options=None, options_instance=None,
charm_instance=None):
"""
:param relations: List of instances of relation classes
:param options: Configuration class to use (DEPRECATED)
:param options_instance: Instance of Configuration class to use
:param charm_instance: an instance of the charm class
"""
super(OpenStackAPIRelationAdapters, self).__init__(
relations,
options=options,
options_instance=options_instance,
charm_instance=charm_instance)
if 'cluster' not in self._relations:
# cluster has not been passed through already, so try to resolve it
# automatically when it is accessed.
self.__resolved_cluster = None
# add a property for the cluster to resolve it
self._relations.add('cluster')
setattr(self.__class__, 'cluster',
property(lambda x: x.__cluster()))
def __cluster(self):
"""The cluster relations is auto added onto adapters instance"""
if not self.__resolved_cluster:
self.__resolved_cluster = self.__resolve_cluster()
return self.__resolved_cluster
def __resolve_cluster(self):
""" Resolve what the cluster adapter is.
LY: The cluster interface only gets initialised if there are more
than one unit in a cluster, however, a cluster of one unit is valid
for the Openstack API charms. So, create and populate the 'cluster'
namespace with data for a single unit if there are no peers.
:returns: cluster adapter or None
"""
smm = PeerHARelationAdapter(relation_name='cluster').single_mode_map
if smm:
return smm
else:
# LY: Automatically add the cluster relation if it exists and
# has not been passed through.
cluster_rel = reactive.RelationBase.from_state('cluster.connected')
if cluster_rel:
return PeerHARelationAdapter(relation=cluster_rel)
return None
|
{
"content_hash": "459eb8b2ae5464a7a8b613e5ee4be9f6",
"timestamp": "",
"source": "github",
"line_count": 1029,
"max_line_length": 79,
"avg_line_length": 37.207968901846456,
"alnum_prop": 0.57740225141693,
"repo_name": "ajkavanagh/charms.openstack",
"id": "12e1629f2af78577b03c2ea13de257fba42440b0",
"size": "38862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "charms_openstack/adapters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "344"
},
{
"name": "Python",
"bytes": "326700"
}
],
"symlink_target": ""
}
|
import sys
try:
sys.getsizeof
except AttributeError:
print('SKIP')
raise SystemExit
print(sys.getsizeof(1) >= 2)
print(sys.getsizeof("") >= 2)
print(sys.getsizeof((1, 2)) >= 2)
print(sys.getsizeof([1, 2]) >= 2)
print(sys.getsizeof({1: 2}) >= 2)
class A:
pass
print(sys.getsizeof(A()) > 0)
try:
assert sys.getsizeof(set()) >= 2
except NameError:
pass
# Only test deque if we have it
try:
from ucollections import deque
assert sys.getsizeof(deque((), 1)) > 0
except ImportError:
pass
|
{
"content_hash": "5cf5456d3c453efa5fba3578e64f0b61",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 42,
"avg_line_length": 18.03448275862069,
"alnum_prop": 0.6443594646271511,
"repo_name": "pfalcon/micropython",
"id": "32d891418365ae240d7f5299538cc3dbba05b5ec",
"size": "592",
"binary": false,
"copies": "1",
"ref": "refs/heads/pfalcon",
"path": "tests/basics/sys_getsizeof.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "10582"
},
{
"name": "C",
"bytes": "14095787"
},
{
"name": "C++",
"bytes": "588783"
},
{
"name": "CMake",
"bytes": "876"
},
{
"name": "JavaScript",
"bytes": "5792"
},
{
"name": "Makefile",
"bytes": "153731"
},
{
"name": "Objective-C",
"bytes": "7411"
},
{
"name": "Python",
"bytes": "1060906"
},
{
"name": "Shell",
"bytes": "16846"
}
],
"symlink_target": ""
}
|
import sys, os, re, subprocess, codecs, optparse
CMD_PYTHON = sys.executable
QOOXDOO_PATH = '../../../thirdparty/qooxdoo/qooxdoo-2.1.1-sdk'
QX_PYLIB = "tool/pylib"
##
# A derived OptionParser class that ignores unknown options (The parent
# class raises in those cases, and stops further processing).
# We need this, as we are only interested in -c/--config on this level, and
# want to ignore pot. other options.
#
class IgnoringUnknownOptionParser(optparse.OptionParser):
##
# <rargs> is the raw argument list. The original _process_args mutates
# rargs, processing options into <values> and copying interspersed args
# into <largs>. This overridden version ignores unknown or ambiguous
# options.
def _process_args(self, largs, rargs, values):
while rargs:
try:
optparse.OptionParser._process_args(self, largs, rargs, values)
except (optparse.BadOptionError, optparse.AmbiguousOptionError):
pass
def parseArgs():
parser = IgnoringUnknownOptionParser(add_help_option=False)
parser.add_option(
"-c", "--config", dest="config", metavar="CFGFILE",
default="config.json", help="path to configuration file"
)
parser.add_option(
"-v", "--verbose", dest="verbose", action="store_true",
default=False, help="run in verbose mode"
)
(options, args) = parser.parse_args(sys.argv[1:])
return options, args
ShellOptions, ShellArgs = parseArgs()
# this is from misc.json, duplicated for decoupling
_eolComment = re.compile(r'(?<![a-zA-Z]:)//.*$', re.M) # double $ for string.Template
_mulComment = re.compile(r'/\*.*?\*/', re.S)
def stripComments(s):
b = _eolComment.sub('',s)
b = _mulComment.sub('',b)
return b
def getQxPath():
path = QOOXDOO_PATH
# OS env takes precedence
if os.environ.has_key("QOOXDOO_PATH"):
path = os.environ["QOOXDOO_PATH"]
# else use QOOXDOO_PATH from config.json
else:
config_file = ShellOptions.config
if os.path.exists(config_file):
# try json parsing with qx json
if not path.startswith('${'): # template macro has been resolved
sys.path.insert(0, os.path.join(path, QX_PYLIB))
try:
from misc import json
got_json = True
except:
got_json = False
got_path = False
if got_json:
config_str = codecs.open(config_file, "r", "utf-8").read()
#config_str = stripComments(config_str) # not necessary under demjson
config = json.loads(config_str)
p = config.get("let")
if p:
p = p.get("QOOXDOO_PATH")
if p:
path = p
got_path = True
# regex parsing - error prone
if not got_path:
qpathr=re.compile(r'"QOOXDOO_PATH"\s*:\s*"([^"]*)"\s*,?')
conffile = codecs.open(config_file, "r", "utf-8")
aconffile = conffile.readlines()
for line in aconffile:
mo = qpathr.search(line)
if mo:
path = mo.group(1)
break # assume first occurrence is ok
path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), path))
return path
os.chdir(os.path.dirname(os.path.abspath(sys.argv[0]))) # switch to skeleton dir
qxpath = getQxPath()
REAL_GENERATOR = os.path.join(qxpath, 'tool', 'bin', 'generator.py')
if not os.path.exists(REAL_GENERATOR):
print "Cannot find real generator script under: \"%s\"; aborting" % REAL_GENERATOR
sys.exit(1)
elif ShellOptions.verbose:
print "\nInvoking real generator under %s ..." % REAL_GENERATOR
argList = []
argList.append(CMD_PYTHON)
argList.append(REAL_GENERATOR)
argList.extend(sys.argv[1:])
if sys.platform == "win32":
argList1=[]
for arg in argList:
if arg.find(' ')>-1:
argList1.append('"%s"' % arg)
else:
argList1.append(arg)
argList = argList1
else:
argList = ['"%s"' % x for x in argList] # quote argv elements
cmd = " ".join(argList)
retval = subprocess.call(cmd, shell=True)
sys.exit(retval)
|
{
"content_hash": "60c3b0803185fde3bda95ad7b6c568d8",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 94,
"avg_line_length": 35.04838709677419,
"alnum_prop": 0.5883571099861942,
"repo_name": "ms123s/simpl4-src",
"id": "683adedd35dcfb0ac1d4b149fb05deeec663967c",
"size": "5011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/website/generate.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3887"
},
{
"name": "CSS",
"bytes": "735606"
},
{
"name": "Groovy",
"bytes": "512961"
},
{
"name": "HTML",
"bytes": "3285460"
},
{
"name": "Java",
"bytes": "4169538"
},
{
"name": "JavaScript",
"bytes": "33927529"
},
{
"name": "Python",
"bytes": "14350"
},
{
"name": "Shell",
"bytes": "72131"
},
{
"name": "Smarty",
"bytes": "19765"
},
{
"name": "XSLT",
"bytes": "579304"
}
],
"symlink_target": ""
}
|
"""
Helpers for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import asyncio
import collections
import logging
from homeassistant.core import callback
from .const import (
CLUSTER_TYPE_IN,
CLUSTER_TYPE_OUT,
DATA_ZHA,
DATA_ZHA_GATEWAY,
DEFAULT_BAUDRATE,
RadioType,
)
from .registries import BINDABLE_CLUSTERS
_LOGGER = logging.getLogger(__name__)
ClusterPair = collections.namedtuple("ClusterPair", "source_cluster target_cluster")
async def safe_read(
cluster, attributes, allow_cache=True, only_cache=False, manufacturer=None
):
"""Swallow all exceptions from network read.
If we throw during initialization, setup fails. Rather have an entity that
exists, but is in a maybe wrong state, than no entity. This method should
probably only be used during initialization.
"""
try:
result, _ = await cluster.read_attributes(
attributes,
allow_cache=allow_cache,
only_cache=only_cache,
manufacturer=manufacturer,
)
return result
except Exception: # pylint: disable=broad-except
return {}
async def check_zigpy_connection(usb_path, radio_type, database_path):
"""Test zigpy radio connection."""
if radio_type == RadioType.ezsp.name:
import bellows.ezsp
from bellows.zigbee.application import ControllerApplication
radio = bellows.ezsp.EZSP()
elif radio_type == RadioType.xbee.name:
import zigpy_xbee.api
from zigpy_xbee.zigbee.application import ControllerApplication
radio = zigpy_xbee.api.XBee()
elif radio_type == RadioType.deconz.name:
import zigpy_deconz.api
from zigpy_deconz.zigbee.application import ControllerApplication
radio = zigpy_deconz.api.Deconz()
elif radio_type == RadioType.zigate.name:
import zigpy_zigate.api
from zigpy_zigate.zigbee.application import ControllerApplication
radio = zigpy_zigate.api.ZiGate()
try:
await radio.connect(usb_path, DEFAULT_BAUDRATE)
controller = ControllerApplication(radio, database_path)
await asyncio.wait_for(controller.startup(auto_form=True), timeout=30)
await controller.shutdown()
except Exception: # pylint: disable=broad-except
return False
return True
def convert_ieee(ieee_str):
"""Convert given ieee string to EUI64."""
from zigpy.types import EUI64, uint8_t
if ieee_str is None:
return None
return EUI64([uint8_t(p, base=16) for p in ieee_str.split(":")])
def get_attr_id_by_name(cluster, attr_name):
"""Get the attribute id for a cluster attribute by its name."""
return next(
(
attrid
for attrid, (attrname, datatype) in cluster.attributes.items()
if attr_name == attrname
),
None,
)
async def get_matched_clusters(source_zha_device, target_zha_device):
"""Get matched input/output cluster pairs for 2 devices."""
source_clusters = source_zha_device.async_get_std_clusters()
target_clusters = target_zha_device.async_get_std_clusters()
clusters_to_bind = []
for endpoint_id in source_clusters:
for cluster_id in source_clusters[endpoint_id][CLUSTER_TYPE_OUT]:
if cluster_id not in BINDABLE_CLUSTERS:
continue
for t_endpoint_id in target_clusters:
if cluster_id in target_clusters[t_endpoint_id][CLUSTER_TYPE_IN]:
cluster_pair = ClusterPair(
source_cluster=source_clusters[endpoint_id][CLUSTER_TYPE_OUT][
cluster_id
],
target_cluster=target_clusters[t_endpoint_id][CLUSTER_TYPE_IN][
cluster_id
],
)
clusters_to_bind.append(cluster_pair)
return clusters_to_bind
@callback
def async_is_bindable_target(source_zha_device, target_zha_device):
"""Determine if target is bindable to source."""
source_clusters = source_zha_device.async_get_std_clusters()
target_clusters = target_zha_device.async_get_std_clusters()
for endpoint_id in source_clusters:
for t_endpoint_id in target_clusters:
matches = set(
source_clusters[endpoint_id][CLUSTER_TYPE_OUT].keys()
).intersection(target_clusters[t_endpoint_id][CLUSTER_TYPE_IN].keys())
if any(bindable in BINDABLE_CLUSTERS for bindable in matches):
return True
return False
async def async_get_zha_device(hass, device_id):
"""Get a ZHA device for the given device registry id."""
device_registry = await hass.helpers.device_registry.async_get_registry()
registry_device = device_registry.async_get(device_id)
zha_gateway = hass.data[DATA_ZHA][DATA_ZHA_GATEWAY]
ieee_address = list(list(registry_device.identifiers)[0])[1]
ieee = convert_ieee(ieee_address)
return zha_gateway.devices[ieee]
class LogMixin:
"""Log helper."""
def log(self, level, msg, *args):
"""Log with level."""
raise NotImplementedError
def debug(self, msg, *args):
"""Debug level log."""
return self.log(logging.DEBUG, msg, *args)
def info(self, msg, *args):
"""Info level log."""
return self.log(logging.INFO, msg, *args)
def warning(self, msg, *args):
"""Warning method log."""
return self.log(logging.WARNING, msg, *args)
def error(self, msg, *args):
"""Error level log."""
return self.log(logging.ERROR, msg, *args)
|
{
"content_hash": "6226adfa6827434131523ead8ebb83c9",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 87,
"avg_line_length": 33.23699421965318,
"alnum_prop": 0.6417391304347826,
"repo_name": "Cinntax/home-assistant",
"id": "b07658e72d01ebacea3f02268b83633a3227ab97",
"size": "5750",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zha/core/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17374056"
},
{
"name": "Shell",
"bytes": "6792"
}
],
"symlink_target": ""
}
|
import os
import ConfigParser
import argparse
from apscheduler.scheduler import Scheduler
from datetime import datetime
from modules.light import read_light
from modules.soil_moisture import get_moisture
from modules.soil_temperature import get_celsius
from modules.temperature_and_humidity import get_temperature_and_humidity
from modules.controller import Controller
from api import publish_data, publish_image, get_triggers
from output import write_data, write_image
parser = argparse.ArgumentParser(description="Farmy Raspberry Pi Client")
parser.add_argument('--mode', dest='mode', type=str, default='once', help="Set mode. 'hold' or 'once'")
parser.add_argument('--config', dest='config_path', type=str, required=True)
config = ConfigParser.ConfigParser()
def fetch_data(file_path, dht_pin, plant_id, api_key):
print("Fetch Data {}".format(datetime.now()))
data = dict(
light=read_light(),
soil_moisture=get_moisture(),
soil_temperature=get_celsius(),
)
temperature, humidity = get_temperature_and_humidity(dht_pin)
if temperature is not None:
data.update(dict(
temperature=temperature,
humidity=humidity
))
now = datetime.now()
data.update(dict(
ts=int((now - datetime.fromtimestamp(0)).total_seconds()),
dt=now.strftime("%Y/%m/%d %H:%M:%S")
))
print(data)
write_data(data, file_path)
publish_data(data, plant_id, api_key)
def fetch_image(file_path, camera_type, plant_id, api_key):
light = read_light()
if light <= 5:
print('Too dark to take photo.')
return
if camera_type == 'web':
from modules.camera.webcam import take_picture_web
image_raw = take_picture_web()
elif camera_type == 'pi':
from modules.camera.pi import take_picture_pi
image_raw = take_picture_pi()
else:
raise ValueError("camera_type `{}` invalid".format(camera_type))
write_image(image_raw, file_path)
print('Take Picture by {}. Save to {}'.format(camera_type, file_path))
publish_image(image_raw, plant_id, api_key)
def trigger(pump_pin, led_pin, plant_id, api_key):
pump_controller = Controller(pump_pin)
led_controller = Controller(led_pin)
triggers = get_triggers(plant_id, api_key)
for trigger_data in triggers:
action = trigger_data['action']
if trigger_data['controller'] == 'pump':
getattr(pump_controller, action)()
else:
getattr(led_controller, action)()
def check_device(camera_type):
if camera_type == "web":
from modules.camera.webcam import web_camera
if web_camera is None:
raise ValueError("web camera not found.")
elif camera_type == "pi":
from modules.camera.pi import pi_camera
if pi_camera is None:
raise ValueError("Pi Camera not found.")
else:
raise ValueError("camera_type `{}` invalid".format(camera_type))
def main():
args = parser.parse_args()
mode = args.mode
config_path = args.config_path
config.read(config_path)
camera_type = config.get('device', 'camera_type')
file_path = config.get('log', 'file_path')
dht_pin = config.getint('device', 'dht_pin')
plant_id = config.get('api', 'plant_id')
api_key = config.get('api', 'api_key')
pump_pin = config.getint('device', 'pump_pin')
led_pin = config.getint('device', 'led_pin')
check_device(camera_type)
print("Farmy device init.")
image_path = os.path.join(file_path, 'photos')
if not os.path.exists(image_path):
os.makedirs(image_path)
if mode == 'hold':
print('Starting...')
sched = Scheduler()
sched.start()
sched.add_cron_job(fetch_data, minute="*/10",
args=[file_path, dht_pin, plant_id, api_key]) # run every 10 minute
sched.add_cron_job(fetch_image, minute="*/30",
args=[image_path, camera_type, plant_id, api_key]) # run every 30 minute
sched.add_cron_job(trigger, minute="*/10",
args=[pump_pin, led_pin, plant_id, api_key]) # run every 10 minute
trigger(pump_pin, led_pin, plant_id, api_key)
raw_input("Press enter to exit the program\n")
elif mode == 'once':
fetch_data(file_path, dht_pin, plant_id, api_key)
fetch_image(file_path, camera_type, plant_id, api_key)
trigger(pump_pin, led_pin, plant_id, api_key)
else:
print('`--mode` option invalid. `hold` or `once`')
if __name__ == "__main__":
main()
|
{
"content_hash": "2109f02ea3720f51a3fa28641fca24d9",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 103,
"avg_line_length": 35.38461538461539,
"alnum_prop": 0.6308695652173913,
"repo_name": "farmy-maker/farmy-py",
"id": "b1ea2b94032e054cb75dd5e2657dd1c8f510e393",
"size": "4616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "farmy/farmy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12080"
}
],
"symlink_target": ""
}
|
try:
import sys
import os
from Players.Audio import * # @UnusedWildImport
except ImportError, error:
print >> sys.stderr, "Erro ao importar o modulo", error
os._exit(1)
class ChannelSom:
"""Classe ChannelSom é uma classe para simular/tratar/lidar com canais de Som
:version: 224
:author: Felipe Miranda
"""
def __init__(self):
"""Construtor da classe
"""
self.channels = {}
self._loadChannels()
self.playerAudio = Audio(os.path.abspath("Comodo/Channel/Files/SOM/Audios/channel1.mp3"), 0)
def _loadChannels(self):
"""Método que faz o mapeamento canal/caminho do arquivo de Som
"""
try:
if (not os.path.isfile(os.path.abspath("Comodo/Channel/Files/SOM/Audios/channel1.mp3")) or
not os.path.isfile(os.path.abspath("Comodo/Channel/Files/SOM/Audios/channel2.mp3")) or
not os.path.isfile(os.path.abspath("Comodo/Channel/Files/SOM/Audios/channel3.mp3"))) :
raise IOError("Arquivos de audio inexistentes")
self.channels["1"] = os.path.abspath("Comodo/Channel/Files/SOM/Audios/channel1.mp3")
self.channels["2"] = os.path.abspath("Comodo/Channel/Files/SOM/Audios/channel2.mp3")
self.channels["3"] = os.path.abspath("Comodo/Channel/Files/SOM/Audios/channel3.mp3")
except IOError:
try:
if (not os.path.isfile(os.path.abspath("trunk/Comodo/Channel/Files/SOM/Audios/channel1.mp3")) or
not os.path.isfile(os.path.abspath("trunk/Comodo/Channel/Files/SOM/Audios/channel2.mp3")) or
not os.path.isfile(os.path.abspath("trunk/Comodo/Channel/Files/SOM/Audios/channel3.mp3"))) :
raise IOError("Arquivos de audio inexistentes")
self.channels["1"] = os.path.abspath("trunk/Comodo/Channel/Files/SOM/Audios/channel1.mp3")
self.channels["2"] = os.path.abspath("trunk/Comodo/Channel/Files/SOM/Audios/channel2.mp3")
self.channels["3"] = os.path.abspath("trunk/Comodo/Channel/Files/SOM/Audios/channel3.mp3")
except IOError:
print >> sys.stderr, error
os._exit(1)
def setChannel(self, channel):
"""Método modificador de canal do Som
:Param channel: Canal do Som
:Type channel: String
"""
self.playerAudio.audio(self.channels.get(channel))
def getPlayerAudio(self):
"""Método acessador de player de audio
:Return: Reprodutor de audio
:Rtype: Objeto Audio
"""
return self.playerAudio
|
{
"content_hash": "fcbff390cc2dbab973eb68c5856caa51",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 113,
"avg_line_length": 42.317460317460316,
"alnum_prop": 0.6125281320330083,
"repo_name": "felipelindemberg/ControleMultimidiaUniversal",
"id": "98e99835bf72490319b1a2d502fceb934a54ba53",
"size": "2709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python_Controle_Multimidia_Universal/trunk/Comodo/Channel/ChannelSom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "23844"
},
{
"name": "Python",
"bytes": "118689"
},
{
"name": "Shell",
"bytes": "11400"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function
from django.conf.urls import patterns, url
from .webhook import GitHubIntegrationsWebhookEndpoint
urlpatterns = patterns(
'',
url(r'^webhook/$', GitHubIntegrationsWebhookEndpoint.as_view()),
)
|
{
"content_hash": "fba1c5bcde7982aec023500dfa8d27dc",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 68,
"avg_line_length": 25.9,
"alnum_prop": 0.7606177606177607,
"repo_name": "looker/sentry",
"id": "b51cbae85e9332451889f3d4e7ccd66aa1823095",
"size": "259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/integrations/github/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "289931"
},
{
"name": "HTML",
"bytes": "241322"
},
{
"name": "JavaScript",
"bytes": "3112298"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "36341504"
},
{
"name": "Ruby",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
}
|
import os, sys, urllib, urllib2, socket
import xbmc, xbmcvfs, xbmcgui, xbmcaddon
import CommonFunctions
import re
import ftplib
import shutil
import time
import datetime
from datetime import date
# Minimal code to import bossanova808 common code
ADDON = xbmcaddon.Addon()
CWD = ADDON.getAddonInfo('path')
RESOURCES_PATH = xbmc.translatePath( os.path.join( CWD, 'resources' ))
LIB_PATH = xbmc.translatePath(os.path.join( RESOURCES_PATH, "lib" ))
sys.path.append( LIB_PATH )
from b808common import *
#import the tables that map conditions to icon number and short days to long days
from utilities import *
#parseDOM setup
common = CommonFunctions
common.plugin = ADDONNAME + "-" + VERSION
dbg = False # Set to false if you don't want debugging
dbglevel = 3
#Handy Strings
WEATHER_WINDOW = xbmcgui.Window(12600)
WEATHERZONE_URL = 'http://www.weatherzone.com.au'
FTPSTUB = "ftp://anonymous:someone%40somewhere.com@ftp.bom.gov.au//anon/gen/radar_transparencies/"
HTTPSTUB = "http://www.bom.gov.au/products/radar_transparencies/"
RADAR_BACKGROUNDS_PATH = ""
LOOP_IMAGES_PATH = ""
TEMPUNIT = unicode(xbmc.getRegion('tempunit'),encoding='utf-8')
WEATHER_ICON = xbmc.translatePath('special://temp/weather/%s.png').decode("utf-8")
# this is fetchpage from parseDOM...
# added emergency latin 1 decoding for wierd char issues on Weatherzone
def fetchPage(params={}):
get = params.get
link = get("link")
ret_obj = {}
if get("post_data"):
log("called for : " + repr(params['link']))
else:
log("called for : " + repr(params))
if not link or int(get("error", "0")) > 2:
log("giving up")
ret_obj["status"] = 500
return ret_obj
if get("post_data"):
if get("hide_post_data"):
log("Posting data", 2)
else:
log("Posting data: " + urllib.urlencode(get("post_data")), 2)
request = urllib2.Request(link, urllib.urlencode(get("post_data")))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
else:
log("Got request", 2)
request = urllib2.Request(link)
if get("headers"):
for head in get("headers"):
request.add_header(head[0], head[1])
request.add_header('User-Agent', USERAGENT)
if get("cookie"):
request.add_header('Cookie', get("cookie"))
if get("refering"):
request.add_header('Referer', get("refering"))
try:
log("connecting to server...", 1)
con = urllib2.urlopen(request)
ret_obj["header"] = con.info()
ret_obj["new_url"] = con.geturl()
if get("no-content", "false") == u"false" or get("no-content", "false") == "false":
inputdata = con.read()
#data_type = chardet.detect(inputdata)
#inputdata = inputdata.decode(data_type["encoding"]
try:
ret_obj["content"] = inputdata.decode("utf-8")
except:
try:
ret_obj["content"] = inputdata.decode("latin-1")
except:
raise
con.close()
log("Done")
ret_obj["status"] = 200
return ret_obj
except urllib2.HTTPError, e:
err = str(e)
log("HTTPError : " + err)
log("HTTPError - Headers: " + str(e.headers) + " - Content: " + e.fp.read())
params["error"] = str(int(get("error", "0")) + 1)
ret = fetchPage(params)
if not "content" in ret and e.fp:
ret["content"] = e.fp.read()
return ret
ret_obj["status"] = 500
return ret_obj
except urllib2.URLError, e:
err = str(e)
log("URLError : " + err)
time.sleep(3)
params["error"] = str(int(get("error", "0")) + 1)
ret_obj = fetchPage(params)
return ret_obj
################################################################################
# blank out all the window properties
def clearProperties():
log("Clearing Properties")
try:
setProperty(WEATHER_WINDOW, 'Weather.IsFetched',"false")
setProperty(WEATHER_WINDOW, 'Current.IsFetched',"false")
setProperty(WEATHER_WINDOW, 'Today.IsFetched' ,"false")
setProperty(WEATHER_WINDOW, 'Daily.IsFetched' ,"false")
setProperty(WEATHER_WINDOW, 'Radar')
setProperty(WEATHER_WINDOW, 'Video.1')
#now clear all the XBMC current weather properties
setProperty(WEATHER_WINDOW, 'Current.Condition')
setProperty(WEATHER_WINDOW, 'Current.ConditionLong')
setProperty(WEATHER_WINDOW, 'Current.Temperature')
setProperty(WEATHER_WINDOW, 'Current.Wind')
setProperty(WEATHER_WINDOW, 'Current.WindDirection')
setProperty(WEATHER_WINDOW, 'Current.WindDegree')
setProperty(WEATHER_WINDOW, 'Current.WindGust')
setProperty(WEATHER_WINDOW, 'Current.Pressure')
setProperty(WEATHER_WINDOW, 'Current.FireDanger')
setProperty(WEATHER_WINDOW, 'Current.FireDangerText')
setProperty(WEATHER_WINDOW, 'Current.Visibility')
setProperty(WEATHER_WINDOW, 'Current.Humidity')
setProperty(WEATHER_WINDOW, 'Current.FeelsLike')
setProperty(WEATHER_WINDOW, 'Current.DewPoint')
setProperty(WEATHER_WINDOW, 'Current.UVIndex')
setProperty(WEATHER_WINDOW, 'Current.OutlookIcon', "na.png")
setProperty(WEATHER_WINDOW, 'Current.ConditionIcon', "na.png")
setProperty(WEATHER_WINDOW, 'Current.FanartCode')
setProperty(WEATHER_WINDOW, 'Current.Sunrise')
setProperty(WEATHER_WINDOW, 'Current.Sunset')
setProperty(WEATHER_WINDOW, 'Today.Sunrise')
setProperty(WEATHER_WINDOW, 'Today.Sunset')
setProperty(WEATHER_WINDOW, 'Today.moonphase')
setProperty(WEATHER_WINDOW, 'Current.RainSince9')
setProperty(WEATHER_WINDOW, 'Current.RainLastHr')
setProperty(WEATHER_WINDOW, 'Current.Precipitation')
setProperty(WEATHER_WINDOW, 'Current.ChancePrecipitation')
setProperty(WEATHER_WINDOW, 'Current.SolarRadiation')
#and all the properties for the forecast
for count in range(0,7):
setProperty(WEATHER_WINDOW, 'Day%i.Title' % count)
setProperty(WEATHER_WINDOW, 'Day%i.RainChance' % count)
setProperty(WEATHER_WINDOW, 'Day%i.RainChanceAmount' % count)
setProperty(WEATHER_WINDOW, 'Day%i.ChancePrecipitation' % count)
setProperty(WEATHER_WINDOW, 'Day%i.Precipitation' % count)
setProperty(WEATHER_WINDOW, 'Day%i.HighTemp' % count)
setProperty(WEATHER_WINDOW, 'Day%i.LowTemp' % count)
setProperty(WEATHER_WINDOW, 'Day%i.HighTemperature' % count)
setProperty(WEATHER_WINDOW, 'Day%i.LowTemperature' % count)
setProperty(WEATHER_WINDOW, 'Day%i.Outlook' % count)
setProperty(WEATHER_WINDOW, 'Day%i.LongOutlookDay' % count)
setProperty(WEATHER_WINDOW, 'Day%i.OutlookIcon' % count, "na.png")
setProperty(WEATHER_WINDOW, 'Day%i.ConditionIcon' % count, "na.png")
setProperty(WEATHER_WINDOW, 'Day%i.FanartCode' % count)
setProperty(WEATHER_WINDOW, 'Day.%i.ShortDate' % count)
setProperty(WEATHER_WINDOW, 'Day.%i.ShortDay' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.Title' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.RainChance' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.RainChanceAmount' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.ChancePrecipitation' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.Precipitation' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.HighTemp' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.LowTemp' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.HighTemperature' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.LowTemperature' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.Outlook' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.LongOutlookDay' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.OutlookIcon' % count, "na.png")
setProperty(WEATHER_WINDOW, 'Daily.%i.ConditionIcon' % count, "na.png")
setProperty(WEATHER_WINDOW, 'Daily.%i.FanartCode' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.ShortDate' % count)
setProperty(WEATHER_WINDOW, 'Daily.%i.ShortDay' % count)
except Exception as inst:
log("********** OzWeather Couldn't clear all the properties, sorry!!", inst)
################################################################################
# set the location and radar code properties
def refresh_locations():
log("Refreshing locations from settings")
location_set1 = ADDON.getSetting('Location1')
location_set2 = ADDON.getSetting('Location2')
location_set3 = ADDON.getSetting('Location3')
locations = 0
if location_set1 != '':
locations += 1
setProperty(WEATHER_WINDOW, 'Location1', location_set1)
else:
setProperty(WEATHER_WINDOW, 'Location1', '')
if location_set2 != '':
locations += 1
setProperty(WEATHER_WINDOW, 'Location2', location_set2)
else:
setProperty(WEATHER_WINDOW, 'Location2', '')
if location_set3 != '':
locations += 1
setProperty(WEATHER_WINDOW, 'Location3', location_set3)
else:
setProperty(WEATHER_WINDOW, 'Location3', '')
setProperty(WEATHER_WINDOW, 'Locations', str(locations))
log("Refreshing radar locations from settings")
radar_set1 = ADDON.getSetting('Radar1')
radar_set2 = ADDON.getSetting('Radar2')
radar_set3 = ADDON.getSetting('Radar3')
radars = 0
if radar_set1 != '':
radars += 1
setProperty(WEATHER_WINDOW, 'Radar1', radar_set1)
else:
setProperty(WEATHER_WINDOW, 'Radar1', '')
if radar_set2 != '':
radars += 1
setProperty(WEATHER_WINDOW, 'Radar2', radar_set2)
else:
setProperty(WEATHER_WINDOW, 'Radar2', '')
if radar_set3 != '':
radars += 1
setProperty(WEATHER_WINDOW, 'Radar3', radar_set3)
else:
setProperty(WEATHER_WINDOW, 'Radar3', '')
setProperty(WEATHER_WINDOW, 'Radars', str(locations))
################################################################################
# The main forecast retrieval function
# Does either a basic forecast or a more extended forecast with radar etc.
# if the appropriate setting is set
def forecast(url, radarCode):
log("Called forecast()")
#pull in the paths
global RADAR_BACKGROUNDS_PATH, LOOP_IMAGES_PATH
#make sure updates look neat
clearProperties()
#check if we're doing jsut a basic data update or data and images
extendedFeatures = ADDON.getSetting('ExtendedFeaturesToggle')
log("Getting weather from " + url + ", Extended features = " + str(extendedFeatures))
#ok now we want to build the radar images first, looks neater
if extendedFeatures == "true":
log("Extended feature powers -> activate!")
#strings to store the paths we will use
RADAR_BACKGROUNDS_PATH = xbmc.translatePath("special://profile/addon_data/weather.ozweather/radarbackgrounds/" + radarCode + "/");
LOOP_IMAGES_PATH = xbmc.translatePath("special://profile/addon_data/weather.ozweather/currentloop/" + radarCode + "/");
log("Build radar images")
buildImages(radarCode)
setProperty(WEATHER_WINDOW, 'Radar', radarCode)
#and now get and set all the temperatures etc.
log("Get the forecast data from weatherzone.com.au: " + url)
try:
#parsedom's fetchpage shits itself if there is any latin-1 endcoded stuff, but try it first anyway
data = common.fetchPage({"link":url})
except:
#if that fails try our local version as a falback
try:
data = fetchPage({"link":url})
except Exception as inst:
log("Error, couldn't fetchPage weather page from WeatherZone [" + url + "]- error: ", inst)
try:
data = {}
data["content"] = urllib2.urlopen(url)
except:
log("Error, couldn't urlopen weather page from WeatherZone [" + url + "]- error: ", inst)
if data != '' and data is not None:
try:
propertiesPDOM(data["content"], extendedFeatures)
except Exception as inst:
log("Error, there is no content returned it seems? Error: ", inst)
setProperty(WEATHER_WINDOW, "Weather.IsFetched", "false")
else:
log("Weatherzone returned empty data??!")
setProperty(WEATHER_WINDOW, "Weather.IsFetched", "false")
################################################################################
# Downloads a radar background given a BOM radar code like IDR023 & filename
# Converts the image from indexed colour to RGBA colour
def downloadBackground(radarCode, fileName):
global RADAR_BACKGROUNDS_PATH, LOOP_IMAGES_PATH
outFileName = fileName
#the legend file doesn't have the radar code in the filename
if fileName == "IDR.legend.0.png":
outFileName = "legend.png"
else:
#append the radar code
fileName = radarCode + "." + fileName
#are the backgrounds stale?
if xbmcvfs.exists( RADAR_BACKGROUNDS_PATH + outFileName ):
fileCreation = os.path.getmtime( RADAR_BACKGROUNDS_PATH + outFileName)
now = time.time()
weekAgo = now - 7*60*60*24 # Number of seconds in a week
#log ("filec " + str(fileCreation) + " dayAgo " + str(dayAgo))
if fileCreation < weekAgo:
log("Background older than one week - let's refresh - " + outFileName)
os.remove(RADAR_BACKGROUNDS_PATH + outFileName)
#download the backgrounds only if we don't have them yet
if not xbmcvfs.exists( RADAR_BACKGROUNDS_PATH + outFileName ):
log("Downloading missing background image...." + outFileName)
#import PIL only if we need it so the add on can be run for data only
#on platforms without PIL
#log("Importing PIL as extra features are activated.")
from PIL import Image
#ok get ready to retrieve some images
image = urllib.URLopener()
#the legend image showing the rain scale
try:
imageFileIndexed = RADAR_BACKGROUNDS_PATH + "idx." + fileName
imageFileRGB = RADAR_BACKGROUNDS_PATH + outFileName
try:
image.retrieve(FTPSTUB + fileName, imageFileIndexed )
except:
log("ftp failed, let's try http instead...")
try:
image.retrieve(HTTPSTUB + fileName, imageFileIndexed )
except:
log("http failed too.. sad face :( ")
#jump to the outer exception
raise
#got here, we must have an image
log("Downloaded background texture...now converting from indexed to RGB - " + fileName)
im = Image.open( imageFileIndexed )
rgbimg = im.convert('RGBA')
rgbimg.save(imageFileRGB, "PNG")
os.remove(imageFileIndexed)
except Exception as inst:
log("Error, couldn't retrieve " + fileName + " - error: ", inst)
#ok try and get it via http instead?
#try REALLY hard to get at least the background image
try:
#ok so something is wrong with image conversion - probably a PIL issue, so let's just get a minimal BG image
if "background.png" in fileName:
if not '00004' in fileName:
image.retrieve(FTPSTUB + fileName, imageFileRGB )
else:
#national radar loop uses a different BG for some reason...
image.retrieve(FTPSTUB + 'IDE00035.background.png', imageFileRGB )
except Exception as inst2:
log("No, really, -> Error, couldn't retrieve " + fileName + " - error: ", inst2)
def prepareBackgrounds(radarCode):
log("Called prepareBackgrounds() with radarCode [" + radarCode + "]")
downloadBackground(radarCode, "IDR.legend.0.png")
downloadBackground(radarCode, "background.png")
#these images don't exist for the national radar, so don't try and get them
if radarCode != "IDR00004":
downloadBackground(radarCode, "locations.png")
downloadBackground(radarCode, "range.png")
downloadBackground(radarCode, "topography.png")
downloadBackground(radarCode, "catchments.png")
################################################################################
# Builds the radar images given a BOM radar code like IDR023
# the radar images are downloaded with each update (~60kb each time)
def buildImages(radarCode):
log("Called buildImages with radarCode: " + radarCode + " and loop path " + LOOP_IMAGES_PATH + " and radar path " + RADAR_BACKGROUNDS_PATH)
#remove the temporary files - we only want fresh radar files
#this results in maybe ~60k used per update.
if os.path.exists( LOOP_IMAGES_PATH ):
log("os.path Removing previous radar files")
shutil.rmtree( LOOP_IMAGES_PATH , ignore_errors=True)
#we need make the directories to store stuff if they don't exist
#delay hack is here to make sure OS has actaully released the handle
#from the rmtree call above before we try and make the directory
if not os.path.exists( RADAR_BACKGROUNDS_PATH ):
attempts = 0
success = False
while not success and (attempts < 20):
try:
os.makedirs( RADAR_BACKGROUNDS_PATH )
success = True
log("Successfully created " + RADAR_BACKGROUNDS_PATH)
except:
attempts += 1
time.sleep(0.1)
if not success:
log("ERROR: Failed to create directory for radar background images!")
return
if not os.path.exists( LOOP_IMAGES_PATH ):
attempts = 0
success = False
while not success and (attempts < 20):
try:
os.makedirs( LOOP_IMAGES_PATH )
success = True
log("Successfully created " + LOOP_IMAGES_PATH)
except:
attempts += 1
time.sleep(0.1)
if not success:
log("ERROR: Failed to create directory for loop images!")
return
log("Prepare the backgrounds if necessary...")
prepareBackgrounds(radarCode)
#Ok so we have the backgrounds...now it is time get the loop
#first we retrieve a list of the available files via ftp
#ok get ready to retrieve some images
log("Download the radar loop")
image = urllib.URLopener()
files = []
log("Log in to BOM FTP")
ftp = ftplib.FTP("ftp.bom.gov.au")
ftp.login("anonymous", "anonymous@anonymous.org")
ftp.cwd("/anon/gen/radar/")
log("Get files list")
#connected, so let's get the list
try:
files = ftp.nlst()
except ftplib.error_perm, resp:
if str(resp) == "550 No files found":
log("No files in BOM ftp directory!")
else:
log("Something wrong in the ftp bit of radar images")
log("Download the files...")
#ok now we need just the matching radar files...
loopPicNames = []
for f in files:
if radarCode in f:
loopPicNames.append(f)
#download the actual images, might as well get the longest loop they have
for f in loopPicNames:
#ignore the composite gif...
if f[-3:] == "png":
imageToRetrieve = "ftp://anonymous:someone%40somewhere.com@ftp.bom.gov.au//anon/gen/radar/" + f
log("Retrieving radar image: " + imageToRetrieve)
try:
image.retrieve(imageToRetrieve, LOOP_IMAGES_PATH + "/" + f )
except Exception as inst:
log("Failed to retrieve radar image: " + imageToRetrieve + ", oh well never mind!", inst )
################################################################################
# this is the main scraper function that uses parseDOM to scrape the
# data from the weatherzone site.
def propertiesPDOM(page, extendedFeatures):
log("Use PDOM to pull weather forecast data")
####CURRENT DATA
try:
#pull data from the current observations table
ret = common.parseDOM(page, "div", attrs = { "class": "details_lhs" })
observations = common.parseDOM(ret, "td", attrs = { "class": "hilite" })
#old style website parise
if not observations:
observations = common.parseDOM(ret, "td", attrs = { "class": "hilite bg_yellow" })
#Observations now looks like - ['18.3°C', '4.7°C', '18.3°C', '41%', 'SSW 38km/h', '48km/h', '1015.7hPa', '-', '0.0mm / -']
log("Current Conditions Retrieved: " + str(observations))
temperature = str(int(round(float(observations[0].strip( '°C' )))))
dewPoint = str(int(round(float(observations[1].strip( '°C' )))))
feelsLike = str(int(round(float(observations[2].strip( '°C' )))))
humidity = observations[3].strip('%')
windTemp = observations[4].partition(' ')
try:
pressure = str(int(round(float(observations[6].strip('hPa')))))
except:
pressure = "n/a"
fireDanger = observations[7]
fireDangerFloat = float(fireDanger)
if 0.0 <= fireDangerFloat <= 11.99:
fireDangerText = "Low - Moderate"
elif 12.0 <= fireDangerFloat <= 24.99:
fireDangerText = "High"
elif 25.0 <= fireDangerFloat <= 49.99:
fireDangerText = "Very High"
elif 50.0 <= fireDangerFloat <= 74.99:
fireDangerText = "Severe"
elif 75.0 <= fireDangerFloat <= 99.99:
fireDangerText = "Extreme"
elif fireDangerFloat >= 100.0:
fireDangerText = "Catastrophic"
else:
fireDangerText = "?"
#make this an int for space reasons
fireDanger = str(int(round(float(observations[7]))))
log("pressure " + pressure)
rainSince = observations[8].partition('/')
log("Rain Since: " + str(rainSince))
rainSince9 = str(rainSince[0].strip())
rainLastHr = str(rainSince[2].strip())
windDirection = windTemp[0]
windSpeed = windTemp[2].strip( 'km/h')
windGusts = observations[5].strip('km/h')
#there's no UV so we get that from the forecast, see below
except Exception as inst:
log("********** OzWeather Couldn't Parse Observations Data, sorry!!", inst)
setProperty(WEATHER_WINDOW, 'Current.Condition', "Error parsing observations!")
setProperty(WEATHER_WINDOW, 'Current.ConditionLong', "Error - Couldn't retrieve current weather data from WeatherZone - this is usually just a temporary problem with their server and with any luck they'll fix it soon!")
setProperty(WEATHER_WINDOW, "Weather.IsFetched", "false")
####END CURRENT DATA
try:
#pull data from the atrological table
ret = common.parseDOM(page, "div", attrs = { "class": "details_rhs" })
observations = common.parseDOM(ret, "td", attrs = { "class": "hilite" })
#old style website parsing
if not observations:
observations = common.parseDOM(ret, "td", attrs = { "class": "hilite bg_yellow" })
log("Astrological Retrieved: " + str(observations))
sunrise = str(observations[0])
sunset = str(observations[1])
except Exception as inst:
log("********** OzWeather Couldn't Parse Astrological Data, sorry!!", inst)
####FORECAST DATA
try:
#pull the basic data from the forecast table
ret = common.parseDOM(page, "table", attrs = { "id": "forecast-table" })
#old style website
if not ret:
ret = common.parseDOM(page, "div", attrs = { "class": "boxed_blue_nopad" })
#create lists of each of the maxes, mins, and descriptions
#Get the days UV in text form like 'Extreme' and number '11'
UVchunk = common.parseDOM(ret, "td", attrs = { "style": "text-align: center;" })
shortDesc = common.parseDOM(ret, "td", attrs = { "class": "bg_yellow" })
shortDesc = common.parseDOM(ret, "span", attrs = { "style": "font-size: 0.9em;" })
else:
trs = common.parseDOM(ret, "tr")
# log("TRS is: " + str(trs))
UVchunk = common.parseDOM(trs[6], "td", attrs = { "style": "text-align: center;" })
shortDesc = common.parseDOM(trs[1], "td", attrs = { })
shortDesc = common.parseDOM(shortDesc, "span", attrs = { })
#create lists of each of the maxes, mins, and descriptions
#Get the days UV in text form like 'Extreme' and number '11'
# log("UVchunk is: " + str(UVchunk))
UVtext = common.parseDOM(UVchunk, "span")
UVnumber = common.parseDOM(UVchunk, "span", ret = "title")
UV = UVtext[0] + ' (' + UVnumber[0] + ')'
#get the 7 day max min forecasts
maxMin = common.parseDOM(ret, "td")
#log( "maxmin is " + str(maxMin))
maxList = stripList(maxMin[7:14],'°C')
minList = stripList(maxMin[14:21],'°C')
rainChanceList = stripList(maxMin[21:28],'')
rainAmountList = stripList(maxMin[28:35],'')
# log (str(rainChanceList) + str(rainAmountList))
#and the short forecasts
shortDesc = shortDesc[0:7]
log(" shortDesc is " + str(shortDesc))
for count, desc in enumerate(shortDesc):
shortDesc[count] = shortDesc[count].title().replace( '-<br />','')
shortDesc[count] = shortDesc[count].title().replace( '-<Br />','')
shortDesc[count] = shortDesc[count].title().replace( 'ThunderStorms','Thunderstorms')
shortDesc[count] = shortDesc[count].title().replace( 'windy','Windy')
#log the collected data, helpful for finding errors
log("Collected data: shortDesc [" + str(shortDesc) + "] maxList [" + str(maxList) +"] minList [" + str(minList) + "]")
#and the names of the days
days = common.parseDOM(ret, "span", attrs = { "style": "font-size: larger;" })
days = common.parseDOM(ret, "span", attrs = { "class": "bold" })
days = days[0:7]
for count, day in enumerate(days):
days[count] = DAYS[day]
#get the longer current forecast for the day
# or just use the short one if this is disabled in settings
if extendedFeatures == "true":
longDayCast = common.parseDOM(page, "div", attrs = { "class": "top_left" })
longDayCast = common.parseDOM(longDayCast, "p" )
longDayCast = common.stripTags(longDayCast[0])
longDayCast = longDayCast.replace( '\t','')
longDayCast = longDayCast.replace( '\r',' ')
longDayCast = longDayCast.replace( '&','&')
longDayCast = longDayCast[:-1]
else:
longDayCast = shortDesc[0]
#if for some reason the codes change return a neat 'na' response
try:
weathercode = WEATHER_CODES[shortDesc[0]]
except:
weathercode = 'na'
except Exception as inst:
log("********** OzWeather Couldn't Parse Forecast Data, sorry!!", inst)
setProperty(WEATHER_WINDOW, 'Current.Condition', "Error parsing data!")
setProperty(WEATHER_WINDOW, 'Current.ConditionLong', "Error - Couldn't retrieve forecast weather data from WeatherZone - this is usually just a temporary problem with their server and with any luck they'll fix it soon!")
setProperty(WEATHER_WINDOW, "Weather.IsFetched", "false")
#END FORECAST DATA
#moonphase
try:
ret = common.parseDOM(page, "table", attrs = { "class": "astronomy" })
#create lists of each of the maxes, mins, and descriptions
#Get the days UV in text form like 'Extreme' and number '11'
#log("ret is " + str(ret))
moonChunk = common.parseDOM(ret, "td", attrs = { "align":"center", "valign":"middle"})
#log("moonChunk is " + str(moonChunk))
moonPhase = common.parseDOM(moonChunk, "img", ret="title")[0]
#log("&&&&& " + str(moonChunk))
log("Moonphase is: " + str(moonPhase[0]))
except Exception as inst:
log("OzWeather Couldn't Find a Moonphase, sorry!", inst)
moonPhase = ""
#ABC VIDEO URL
# note date and quality level variables...
#view source on http://www.abc.net.au/news/abcnews24/weather-in-90-seconds/ and find mp4 to see this list,
#the end of the URL can change regularly
# {'url': 'http://mpegmedia.abc.net.au/news/news24/weather/video/201403/WINs_Weather1_0703_1000k.mp4', 'contentType': 'video/mp4', 'codec': 'AVC', 'bitrate': '928', 'width': '1024', 'height': '576', 'filesize': '11657344'}
# {'url': 'http://mpegmedia.abc.net.au/news/news24/weather/video/201403/WINs_Weather1_0703_256k.mp4', 'contentType': 'video/mp4', 'codec': 'AVC', 'bitrate': '170', 'width': '320', 'height': '180', 'filesize': '2472086'}
# {'url': 'http://mpegmedia.abc.net.au/news/news24/weather/video/201403/WINs_Weather1_0703_512k.mp4', 'contentType': 'video/mp4', 'codec': 'AVC', 'bitrate': '400', 'width': '512', 'height': '288', 'filesize': '5328218'}
# {'url': 'http://mpegmedia.abc.net.au/news/news24/weather/video/201403/WINs_Weather1_0703_trw.mp4', 'contentType': 'video/mp4', 'codec': 'AVC', 'bitrate': '1780', 'width': '1280', 'height': '720', 'filesize': '21599356'}
#Other URLs - should match any of these
#http%3A//mpegmedia.abc.net.au/news/news24/wins/201409/WINm_Update1_0909_VSB03WF2_512k.mp4&
# http://mpegmedia.abc.net.au/news/news24/wins/201409/WINs_Weather2_0209_trw.mp4
#Thus
#//mpegmedia.abc.net.au/news/news24/wins/(.+?)/WIN(.*?)_512k.mp4
try:
log("Trying to get ABC weather video URL")
abcURL = "http://www.abc.net.au/news/abcnews24/weather-in-90-seconds/"
req = urllib2.Request(abcURL)
response = urllib2.urlopen(req)
htmlSource = str(response.read())
pattern_video = "//mpegmedia.abc.net.au/news/news24/wins/(.+?)/WIN(.*?)_512k.mp4"
video = re.findall( pattern_video, htmlSource )
log("Video url parts: " + str(video))
try:
qual = ADDON.getSetting("ABCQuality")
if qual=="Best":
qual="trw"
url = "http://mpegmedia.abc.net.au/news/news24/wins/"+ video[0][0] + "/WIN" + video[0][1] + "_" + qual + ".mp4"
log("Built url " + url)
setProperty(WEATHER_WINDOW, 'Video.1',url)
except Exception as inst:
log("Couldn't get ABC video URL from page", inst)
except Exception as inst:
log("********** Couldn't get ABC video page", inst)
#END ABC VIDEO URL
# set all the XBMC window properties.
# wrap it in a try: in case something goes wrong, it's better than crashing out...
#SET PROPERTIES
try:
#now set all the XBMC current weather properties
setProperty(WEATHER_WINDOW, 'WeatherProviderLogo' , xbmc.translatePath(os.path.join(CWD, 'resources', 'banner.png')))
setProperty(WEATHER_WINDOW, 'WeatherProvider' , 'Bureau of Meteorology Australia (via WeatherZone)')
setProperty(WEATHER_WINDOW, 'WeatherVersion' , ADDONNAME + "-" + VERSION)
setProperty(WEATHER_WINDOW, 'Current.Condition' , shortDesc[0])
setProperty(WEATHER_WINDOW, 'Current.ShortOutlook' , shortDesc[0])
setProperty(WEATHER_WINDOW, 'Current.ConditionLong' , longDayCast)
setProperty(WEATHER_WINDOW, 'Current.Temperature' , temperature)
setProperty(WEATHER_WINDOW, 'Current.WindGust' , windGusts)
setProperty(WEATHER_WINDOW, 'Current.Wind' , windSpeed)
setProperty(WEATHER_WINDOW, 'Current.WindDegree' , windDirection)
setProperty(WEATHER_WINDOW, 'Current.WindDirection' , windDirection)
setProperty(WEATHER_WINDOW, 'Current.Pressure' , pressure)
setProperty(WEATHER_WINDOW, 'Current.FireDanger' , fireDanger)
setProperty(WEATHER_WINDOW, 'Current.FireDangerText' , fireDangerText)
setProperty(WEATHER_WINDOW, 'Current.Humidity' , humidity)
setProperty(WEATHER_WINDOW, 'Current.FeelsLike' , feelsLike)
setProperty(WEATHER_WINDOW, 'Current.DewPoint' , dewPoint)
setProperty(WEATHER_WINDOW, 'Current.UVIndex' , UV)
setProperty(WEATHER_WINDOW, 'Current.Sunrise' , sunrise)
setProperty(WEATHER_WINDOW, 'Current.Sunset' , sunset)
setProperty(WEATHER_WINDOW, 'Current.Precipitation' , rainSince9)
setProperty(WEATHER_WINDOW, 'Current.RainSince9' , rainSince9)
setProperty(WEATHER_WINDOW, 'Current.RainLastHr' , rainLastHr)
setProperty(WEATHER_WINDOW, 'Current.OutlookIcon' , '%s.png' % weathercode)
setProperty(WEATHER_WINDOW, 'Current.ConditionIcon' , '%s.png' % weathercode)
setProperty(WEATHER_WINDOW, 'Current.FanartCode' , weathercode)
setProperty(WEATHER_WINDOW, 'Current.IsFetched' , "true")
setProperty(WEATHER_WINDOW, 'Today.IsFetched' , "true")
setProperty(WEATHER_WINDOW, 'Today.Sunrise' , sunrise)
setProperty(WEATHER_WINDOW, 'Today.Sunset' , sunset)
setProperty(WEATHER_WINDOW, 'Today.moonphase' , moonPhase)
#we only have one long description available so set it here instead of in the loop
setProperty(WEATHER_WINDOW, 'Daily.0.LongOutlookDay' , longDayCast)
#and all the properties for the forecast
for count, desc in enumerate(shortDesc):
try:
weathercode = WEATHER_CODES[shortDesc[count]]
except:
weathercode = 'na'
day = days[count]
tdate = datetime.date.today() #establishes current date
futureDate = tdate + datetime.timedelta(days=count) #establishs the future dates one at a time
newdatetuple = time.strptime(str(futureDate),'%Y-%m-%d')#creates a time tuple of that future date
goodshortDate = time.strftime('%d %b', newdatetuple) #sets the format of the time tuple, taken from this table http://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
#trim leading zero if present
if goodshortDate.startswith("0"):
goodshortDate = goodshortDate[1:]
shortDay = str(time.strftime('%a', newdatetuple)).upper()
#these are the old style labels, use a range of 0 to 6
setProperty(WEATHER_WINDOW, 'Day%i.ShortDate' % count, str(goodshortDate))
setProperty(WEATHER_WINDOW, 'Day%i.ShortDay' % count, shortDay)
setProperty(WEATHER_WINDOW, 'Day%i.Title' % count, day)
setProperty(WEATHER_WINDOW, 'Day%i.ChancePrecipitation' % count, rainChanceList[count])
setProperty(WEATHER_WINDOW, 'Day%i.Precipitation' % count, common.replaceHTMLCodes(rainAmountList[count]))
setProperty(WEATHER_WINDOW, 'Day%i.RainChance' % count, rainChanceList[count])
setProperty(WEATHER_WINDOW, 'Day%i.RainChanceAmount' % count, common.replaceHTMLCodes(rainAmountList[count]))
setProperty(WEATHER_WINDOW, 'Day%i.HighTemperature' % count, maxList[count])
setProperty(WEATHER_WINDOW, 'Day%i.HighTemp' % count, maxList[count])
setProperty(WEATHER_WINDOW, 'Day%i.LowTemperature' % count, minList[count])
setProperty(WEATHER_WINDOW, 'Day%i.LowTemp' % count, minList[count])
setProperty(WEATHER_WINDOW, 'Day%i.Outlook' % count, desc)
setProperty(WEATHER_WINDOW, 'Day%i.OutlookIcon' % count, '%s.png' % weathercode)
setProperty(WEATHER_WINDOW, 'Day%i.ConditionIcon' % count, '%s.png' % weathercode)
setProperty(WEATHER_WINDOW, 'Day%i.FanartCode' % count, weathercode)
#the new Daily labels run from 1 to 7
setProperty(WEATHER_WINDOW, 'Daily.%i.ShortDate' % (count + 1), str(goodshortDate))
setProperty(WEATHER_WINDOW, 'Daily.%i.ShortDay' % (count + 1), shortDay)
setProperty(WEATHER_WINDOW, 'Daily.%i.Title' % (count + 1), day)
setProperty(WEATHER_WINDOW, 'Daily.%i.ChancePrecipitation' % (count + 1), rainChanceList[count])
setProperty(WEATHER_WINDOW, 'Daily.%i.Precipitation' % (count + 1), common.replaceHTMLCodes(rainAmountList[count]))
setProperty(WEATHER_WINDOW, 'Daily.%i.RainChance' % (count + 1), rainChanceList[count])
setProperty(WEATHER_WINDOW, 'Daily.%i.RainChanceAmount' % (count + 1), common.replaceHTMLCodes(rainAmountList[count]))
setProperty(WEATHER_WINDOW, 'Daily.%i.HighTemperature' % (count + 1), maxList[count] + TEMPUNIT)
setProperty(WEATHER_WINDOW, 'Daily.%i.HighTemp' % (count + 1), maxList[count] + TEMPUNIT)
setProperty(WEATHER_WINDOW, 'Daily.%i.LowTemperature' % (count + 1), minList[count] + TEMPUNIT)
setProperty(WEATHER_WINDOW, 'Daily.%i.LowTemp' % (count + 1), minList[count] + TEMPUNIT)
setProperty(WEATHER_WINDOW, 'Daily.%i.Outlook' % (count + 1), desc)
setProperty(WEATHER_WINDOW, 'Daily.%i.OutlookIcon' % (count + 1), WEATHER_ICON % weathercode)
setProperty(WEATHER_WINDOW, 'Daily.%i.ConditionIcon' % (count + 1), WEATHER_ICON % weathercode)
setProperty(WEATHER_WINDOW, 'Daily.%i.FanartCode' % (count + 1), weathercode)
setProperty(WEATHER_WINDOW, 'Forecast.IsFetched' , "true")
setProperty(WEATHER_WINDOW, 'Forecast.City' , ADDON.getSetting('Location%s' % sys.argv[1]))
setProperty(WEATHER_WINDOW, 'Forecast.Country' , "Australia")
setProperty(WEATHER_WINDOW, 'Forecast.Updated' , time.strftime("%d/%m/%Y %H:%M"))
setProperty(WEATHER_WINDOW, 'Daily.IsFetched' , "true")
#Ok, if we got here we're done
setProperty(WEATHER_WINDOW, "Weather.IsFetched", "true")
except Exception as inst:
log("********** OzWeather Couldn't set all the properties, sorry!!", inst)
##############################################
### NOW ACTUALLTY RUN THIS PUPPY - this is main() in the old language...
footprints()
socket.setdefaulttimeout(100)
#the being called from the settings section where the user enters their postcodes
if sys.argv[1].startswith('Location'):
keyboard = xbmc.Keyboard('', LANGUAGE(32195), False)
keyboard.doModal()
if (keyboard.isConfirmed() and keyboard.getText() != ''):
text = keyboard.getText()
log("Doing locations search for " + text)
#need to submit the postcode to the weatherzone search
searchURL = WEATHERZONE_URL + '/search/'
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
host = 'www.weatherzone.com.au'
headers = { 'User-Agent' : user_agent, 'Host' : host }
values = {'q' : text, 't' : '3' }
data = urllib.urlencode(values)
req = urllib2.Request(searchURL, data, headers)
response = urllib2.urlopen(req)
resultPage = str(response.read())
#was there only one match? If so it returns the page for that match so we need to check the URL
responseurl = response.geturl()
log("Response page url: " + responseurl)
if not responseurl.endswith('weatherzone.com.au/search/'):
#we were redirected to an actual result page
locationName = common.parseDOM(resultPage, "h1", attrs = { "class": "local" })
#old style website
if not locationName:
locationName = common.parseDOM(resultPage, "h1", attrs = { "class": "unenclosed" })
locationName = locationName[0].split('Weather')
locations = [locationName[0] + ', ' + text]
locationids = [responseurl]
log("Single result " + str(locations) + " URL " + str(locationids))
else:
#we got back a page to choose a more specific location
try:
locations=[]
locationids=[]
#middle = common.parseDOM(resultPage, "div", attrs = { "id": "structure_middle" })
skimmed = common.parseDOM(resultPage, "ul", attrs = { "class": "typ2" })
#old style wesbite parsing
if not skimmed:
middle = common.parseDOM(resultPage, "div", attrs = { "id": "structure_middle" })
skimmed = common.parseDOM(middle, "ul", attrs = { "class": "typ2" })
#ok now get two lists - one of the friendly names
#and a matchin one of the URLs to store
locations = common.parseDOM(skimmed[0], "a")
templocs = common.parseDOM(skimmed[0], "a", ret="href")
#build the full urls
locationids = []
for count, loc in enumerate(templocs):
locationids.append(WEATHERZONE_URL + '/' + loc)
#if we did not get enough data back there are no locations with this postcode
if len(locations)<=1:
log("No locations found with this postcode")
locations = []
locationids = []
log("Multiple result " + str(locations) + " URLs " + str(locationids))
except:
log("Error - middle: " + str(middle) + " skimmed " + str(skimmed))
#now get them to choose an actual location
dialog = xbmcgui.Dialog()
if locations != []:
selected = dialog.select(xbmc.getLocalizedString(396), locations)
if selected != -1:
ADDON.setSetting(sys.argv[1], locations[selected])
ADDON.setSetting(sys.argv[1] + 'id', locationids[selected])
else:
dialog.ok(ADDONNAME, xbmc.getLocalizedString(284))
#script is being called in general use, not from the settings page
#get the currently selected location and grab it's forecast
else:
#retrieve the currently set location & radar
location = ""
location = ADDON.getSetting('Location%sid' % sys.argv[1])
radar = ""
radar = ADDON.getSetting('Radar%s' % sys.argv[1])
#make sure user has actually set a radar code
if radar == "":
log("Radar code empty for location " + location +" so using default radar code IDR00004 (national radar)")
radar = "IDR00004"
#now get a forecast
forecast(location, radar)
#refresh the locations and set the weather provider property
refresh_locations()
#and close out...
footprints(startup=False)
|
{
"content_hash": "4b17a434cd89b90a9351d154a812b5a5",
"timestamp": "",
"source": "github",
"line_count": 930,
"max_line_length": 228,
"avg_line_length": 47.66559139784946,
"alnum_prop": 0.5918698820185432,
"repo_name": "aplicatii-romanesti/allinclusive-kodi-pi",
"id": "fe7bbf34f990c8783d3df55cb510a968913e4dc0",
"size": "45115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".kodi/addons/weather.ozweather/default.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6178"
},
{
"name": "Python",
"bytes": "8657978"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
}
|
import datetime
import StringIO
import pytest
from django.test import TestCase
from django.db import IntegrityError
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.contrib.auth.models import User
from py.path import local
from geoposition import Geoposition
from web.processors.event import list_countries
from api.models.events import Event
from api.models import UserProfile
from api.processors import get_event_by_id
from web.processors.event import create_or_update_event
from web.processors.event import change_event_status
from web.processors.event import reject_event_status
from api.processors import get_approved_events
from api.processors import get_next_or_previous
from api.processors import get_nearby_events
from web.processors.event import count_approved_events_for_country
class EventTestCase(TestCase):
def get_user(self):
return User.objects.get(pk=1)
def create_event(self, title="Event title",
start_date=datetime.datetime.now() + datetime.timedelta(days=0, hours=3),
end_date=datetime.datetime.now() + datetime.timedelta(days=1, hours=3),
country_code="SI", status="PENDING"):
event_data = {
"end_date": start_date,
"start_date": end_date,
"organizer": "Test organizer",
"creator": self.get_user(),
"title": title,
"pub_date": datetime.datetime.now(),
"country": country_code,
"geoposition": "46.05528,14.51444",
"location": "Ljubljana",
"audience": [1],
"theme": [1],
"status": status,
}
return create_or_update_event(**event_data)
def setUp(self):
self.u1 = User.objects.create(username='user1')
self.up1 = UserProfile.objects.create(user=self.u1)
Event.objects.create(organizer="asdasd",
creator=User.objects.filter(pk=1)[0],
title="asdasd",
description="asdsad",
location="asdsad",
start_date=datetime.datetime.now(),
end_date=datetime.datetime.now(),
event_url="http://eee.com",
contact_person="ss@ss.com",
country="SI",
audience=[1],
theme=[1],
pub_date=datetime.datetime.now(),
tags=["tag1", "tag2"])
def test_get_event(self):
test_event = Event.objects.get(title="asdasd")
self.assertEqual(test_event, get_event_by_id(event_id=1))
def test_create_or_update_event(self):
test_event = create_or_update_event(event_id=1)
self.assertEqual(1, test_event.id)
def test_create_event_without_args(self):
with self.assertRaises(IntegrityError):
test_event = create_or_update_event()
def test_create_event_with_title_only(self):
with self.assertRaises(IntegrityError):
test_event = create_or_update_event(title="event title")
def test_create_event_with_organizer_only(self):
with self.assertRaises(IntegrityError):
event_data = {"organizer":"asdasd"}
test_event = create_or_update_event(**event_data)
def test_create_event_with_start_end_dates_only(self):
with self.assertRaises(IntegrityError):
test_event = create_or_update_event(start_date=datetime.datetime.now(), end_date=datetime.datetime.now())
def test_create_event_from_dictionary_with_missing_required_fields(self):
with self.assertRaises(IntegrityError):
event_data = {
"end_date": datetime.datetime.now(),
"start_date": datetime.datetime.now(),
"organizer": "some organizer"
}
test_event = create_or_update_event(**event_data)
def test_create_event_from_dictionary_with_all_required_fields(self):
event_data = {
"end_date": datetime.datetime.now(),
"start_date": datetime.datetime.now(),
"organizer": "some organizer",
"creator": User.objects.filter(pk=1)[0],
"title": "event title",
"pub_date": datetime.datetime.now(),
}
test_event = create_or_update_event(**event_data)
self.assertEqual(2, test_event.pk)
self.assertEqual("event title", test_event.title)
def test_create_event_from_dict_with_all_fields(self):
with open(local(__file__).dirname + '/../../static/img/team/alja.jpg') as fp:
io = StringIO.StringIO()
io.write(fp.read())
uploaded_picture = InMemoryUploadedFile(io, None, "alja.jpg", "jpeg", io.len, None)
uploaded_picture.seek(0)
event_data = {
"end_date": datetime.datetime.now(),
"start_date": datetime.datetime.now(),
"organizer": "some organizer",
"creator": User.objects.filter(pk=1)[0],
"title": "event title",
"pub_date": datetime.datetime.now(),
"country": "SI",
"geoposition": Geoposition(46.05528,14.51444),
"location": "Ljubljana",
"audience": [1],
"theme": [1],
"tags": ["tag1", "tag2"],
"picture": uploaded_picture
}
test_event = create_or_update_event(**event_data)
self.assertEqual(2, test_event.pk)
self.assertEqual("Ljubljana", test_event.location)
self.assertEqual("46.05528", str(test_event.geoposition.latitude))
self.assertIn("tag1", test_event.tags.names())
self.assertIn("tag2", test_event.tags.names())
assert 'event_picture/alja' in test_event.picture.path
def test_get_approved_event_without_filter_returns_zero(self):
events = get_approved_events()
self.assertQuerysetEqual([], events)
def test_get_approved_event_without_filter_with_pending_event(self):
self.create_event(start_date=datetime.datetime.now() + datetime.timedelta(days=0, hours=3),
end_date=datetime.datetime.now() + datetime.timedelta(days=1, hours=3),
status="APPROVED",)
events = get_approved_events()
self.assertEqual(1, len(events))
def test_get_approved_event_without_filter_with_approved_event_but_passed_date(self):
self.create_event(start_date=datetime.datetime.now() - datetime.timedelta(days=1, hours=3),
end_date=datetime.datetime.now() - datetime.timedelta(days=2, hours=3),
status="APPROVED")
events = get_approved_events()
self.assertEqual(0, len(events))
def test_get_approved_event_with_filter_country_code_with_approved_event(self):
self.create_event(country_code="IS", status="APPROVED")
events = get_approved_events(country_code="IS")
self.assertEqual(1, len(events))
self.assertEqual("IS", events[0].country.code)
def test_get_approved_event_with_filter_country_code_and_order_with_approved_event(self):
countries = ["IS", "DK", "FI", "FI", "LI"]
for index, country in enumerate(countries):
self.create_event(title="Testing event" + str(index + 1), country_code=country, status="APPROVED",
start_date=datetime.datetime.now() + datetime.timedelta(days=0, hours=index + 1),
end_date=datetime.datetime.now() + datetime.timedelta(days=1, hours=index + 1))
events = get_approved_events(order="start_date")
self.assertEqual(5, len(events))
self.assertEqual("IS", events[0].country.code)
self.assertEqual("DK", events[1].country.code)
self.assertEqual("FI", events[2].country.code)
self.assertEqual("FI", events[3].country.code)
self.assertEqual("LI", events[4].country.code)
def test_get_approved_event_with_filter_country_code_and_order_and_limit__with_approved_event(self):
countries = ["IS", "DK", "FI", "FI", "FI"]
for index, country in enumerate(countries):
self.create_event(title="Testing event" + str(index + 1), country_code=country, status="APPROVED",
start_date=datetime.datetime.now() + datetime.timedelta(days=0, hours=index + 1),
end_date=datetime.datetime.now() + datetime.timedelta(days=1, hours=index + 1))
events = get_approved_events(order="start_date", limit=2, country_code="FI")
self.assertEqual(2, len(events))
self.assertEqual("Testing event3", events[0].title)
self.assertEqual("Testing event4", events[1].title)
def test_get_next_or_previous_pending_event(self):
statuses = ["PENDING", "APPROVED", "PENDING"]
for status in statuses:
event = self.create_event(status=status)
test_event = Event.objects.get(pk=2)
next_event = get_next_or_previous(test_event)
self.assertEqual(4, next_event.pk)
test_event_2 = Event.objects.get(pk=4)
next_event_2 = get_next_or_previous(test_event_2)
self.assertEqual(None, next_event_2)
test_event_3 = Event.objects.get(pk=4)
previous_event = get_next_or_previous(test_event_3, direction=False)
self.assertEqual(2, previous_event.pk)
test_event_4 = Event.objects.get(pk=1)
previous_event_2 = get_next_or_previous(test_event_4, direction=False)
self.assertEqual(None, previous_event_2)
def test_get_nearby_events(self):
target_event = self.create_event(status="APPROVED")
nearby_event = self.create_event(status="APPROVED")
nearby = get_nearby_events(target_event)
self.assertEqual(1, len(nearby))
self.assertEqual(nearby_event.pk, nearby[0].pk)
self.create_event(status="APPROVED", country_code="HR")
nearby = get_nearby_events(target_event)
self.assertEqual(1, len(nearby))
self.create_event(status="PENDING")
nearby = get_nearby_events(target_event)
self.assertEqual(1, len(nearby))
def test_change_event_status(self):
pending_event = self.create_event(status="PENDING")
approved = change_event_status(pending_event.id)
self.assertEqual(approved.status, "APPROVED")
test_event = Event.objects.get(pk=pending_event.id)
self.assertEqual(test_event.status, "APPROVED")
approved_event = self.create_event(status="APPROVED")
pending = change_event_status(approved_event.id)
test_event1 = Event.objects.get(pk=approved_event.id)
self.assertEqual(test_event1.status, "PENDING")
def test_reject_event_status(self):
pending_event = self.create_event(status="PENDING")
reject = reject_event_status(pending_event.id)
test_event = Event.objects.get(pk=pending_event.id)
self.assertEqual(test_event.status, "REJECTED")
rejected_event = self.create_event(status="REJECTED")
pending = reject_event_status(rejected_event.id)
test_event1 = Event.objects.get(pk=rejected_event.id)
self.assertEqual(test_event1.status, "PENDING")
def test_edit_event_with_all_fields(self):
# First create a new event
with open(local(__file__).dirname + '/../../static/img/team/alja.jpg') as fp:
io = StringIO.StringIO()
io.write(fp.read())
uploaded_picture = InMemoryUploadedFile(io, None, "alja.jpg", "jpeg", io.len, None)
uploaded_picture.seek(0)
event_data = {
"end_date": datetime.datetime.now(),
"start_date": datetime.datetime.now(),
"organizer": "some organizer",
"creator": User.objects.filter(pk=1)[0],
"title": "event title",
"pub_date": datetime.datetime.now(),
"country": "SI",
"geoposition": Geoposition(46.05528,14.51444),
"location": "Ljubljana",
"audience": [1],
"theme": [1],
"tags": ["tag1", "tag2"],
"picture": uploaded_picture
}
test_event = create_or_update_event(**event_data)
# Then edit it
with open(local(__file__).dirname + '/../../static/img/team/ercchy.jpg') as fp:
io = StringIO.StringIO()
io.write(fp.read())
uploaded_picture = InMemoryUploadedFile(io, None, "ercchy.jpg", "jpeg", io.len, None)
uploaded_picture.seek(0)
event_data = {
"end_date": datetime.datetime.now(),
"start_date": datetime.datetime.now(),
"organizer": "another organiser",
"creator": User.objects.filter(pk=1)[0],
"title": "event title - edited",
"pub_date": datetime.datetime.now(),
"country": "SI",
# "geoposition": Geoposition(46.05528,14.51444),
"location": "Ljubljana",
"audience": [1],
"theme": [1],
"tags": ["tag3", "tag4"],
"picture": uploaded_picture
}
test_event = create_or_update_event(event_id=test_event.id, **event_data)
assert "tag1" not in test_event.tags.names()
assert 'event_picture/alja' not in test_event.picture
assert 'event_picture/ercchy' in test_event.picture.path
@pytest.mark.django_db
def test_create_event_in_moldova(admin_user, db):
event_data = {
'audience': [3],
'theme': [1,2],
'contact_person': u'test@example.com',
'country': u'MD',
'description': u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\r\ntempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\r\nquis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\r\nconsequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\r\ncillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\r\nproident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'event_url': u'',
'location': u'Tiraspol, Moldova',
'organizer': u'RailsGirls Moldova',
"creator": admin_user,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'tags': [u'css', u'html', u'web'],
'title': u'Rails Moldova',
}
test_event = create_or_update_event(event_id=None, **event_data)
assert "MD" == test_event.country.code
@pytest.mark.django_db
def test_create_event_in_kosovo(admin_user, db):
event_data = {
'audience': [3],
'theme': [1,2],
'contact_person': u'test@example.com',
'country': u'XK',
'description': u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\r\ntempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\r\nquis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\r\nconsequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\r\ncillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\r\nproident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'event_url': u'',
'location': u'Shaban Polluzha, Prishtina, Kosova (Kosovo)',
'organizer': u'RailsGirls Kosovo',
"creator": admin_user,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'tags': [u'css', u'html', u'web'],
'title': u'RailsGirls Kosovo',
}
test_event = create_or_update_event(event_id=None, **event_data)
assert "XK" == test_event.country.code
@pytest.mark.django_db
def test_create_event_in_serbia(admin_user, db):
event_data = {
'audience': [3],
'theme': [1,2],
'contact_person': u'test@example.com',
'country': u'RS',
'description': u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\r\ntempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\r\nquis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\r\nconsequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\r\ncillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\r\nproident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'event_url': u'',
'location': u'96 Bulevar despota Stefana, Belgrade, Serbia',
'organizer': u'RailsGirls Serbia',
"creator": admin_user,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'tags': [u'css', u'html', u'web'],
'title': u'RailsGirls Serbia',
}
test_event = create_or_update_event(event_id=None, **event_data)
assert "RS" == test_event.country.code
@pytest.mark.django_db
def test_create_event_in_martinique_for_france(admin_user, db):
event_data = {
'audience': [3],
'theme': [1,2],
'contact_person': u'test@example.com',
'country': u'FR',
'description': u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\r\ntempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\r\nquis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\r\nconsequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\r\ncillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\r\nproident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'event_url': u'',
'location': u'1011 Chemin rural No 8 Bis de Clemencin, Le Lamentin, Martinique',
'organizer': u'RailsGirls Martinique',
"creator": admin_user,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'tags': [u'css', u'html', u'web'],
'title': u'RailsGirls Martinique',
}
test_event = create_or_update_event(event_id=None, **event_data)
assert "FR" == test_event.country.code
@pytest.mark.django_db
def test_create_event_in_each_listed_country(admin_user, db):
all_countries = list_countries()
for country in all_countries[2:]:
country_code = country[1]
country_name = country[0]
event_data = {
'audience': [3],
'theme': [1,2],
'contact_person': u'test@example.com',
'country': country_code,
'description': u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\r\ntempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\r\nquis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\r\nconsequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\r\ncillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\r\nproident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'event_url': u'',
'location': country_name,
'organizer': u'RailsGirls ' + country_name,
"creator": admin_user,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'tags': [u'css', u'html', u'web'],
'title': u'RailsGirls ' + country_name,
}
test_event = create_or_update_event(event_id=None, **event_data)
assert country_code == test_event.country.code
test_event.delete()
def test_list_countries():
# a function we use a lot to get all countries, so let's check it's returning expected results
all_countries = list_countries()
# Austria should be the first country after two custom entries (All countries)
assert "Austria" == all_countries[2][0]
# checking two random countries - our own and Kosovo, which is a special case
assert ('Slovenia', 'SI') in all_countries
assert ('Kosovo', 'XK') in all_countries
# United Kingdom should be last
assert "United Kingdom" == all_countries[-1][0]
# if listing works, results are tuples ('country_name', 'country_code')
# country_code should be a string with 2 characters
for country in all_countries[2:]:
assert len(country[1]) == 2
@pytest.mark.django_db
def test_scoreboard_counter(admin_user, db):
initial_counter = count_approved_events_for_country()
# extra check to make sure the number of results matches
# the number of listed countries minus two custom entries
all_countries = list_countries()
assert len(initial_counter) == len(all_countries[2:])
counted_events_before = 0
for country in initial_counter:
if country['country_code'] == 'SI':
counted_events_before = country['events']
# Adding one approved and one pending event in same country
# the count for events for the country should increase by 1
event_data = {
'audience': [3],
'theme': [1,2],
'country': u'SI',
'description': u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\r\ntempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\r\nquis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\r\nconsequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\r\ncillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\r\nproident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'location': u'Ljubljana, Slovenia',
'organizer': u'testko',
"creator": admin_user,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'title': u'Test Approved Event',
'status':"APPROVED",
}
test_approved_event = create_or_update_event(event_id=None, **event_data)
event_data = {
'audience': [3],
'theme': [1,2],
'country': u'SI',
'description': u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\r\ntempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\r\nquis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\r\nconsequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\r\ncillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\r\nproident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'location': u'Ljubljana, Slovenia',
'organizer': u'testko',
"creator": admin_user,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'title': u'Test Pending Event',
'status':"PENDING",
}
test_pending_event = create_or_update_event(event_id=None, **event_data)
# and one event from another country, which shouldn't increase the counter
event_data = {
'audience': [3],
'theme': [1,2],
'country': u'IT',
'description': u'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod\r\ntempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\r\nquis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo\r\nconsequat. Duis aute irure dolor in reprehenderit in voluptate velit esse\r\ncillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non\r\nproident, sunt in culpa qui officia deserunt mollit anim id est laborum.',
'location': u'Rome, Italy',
'organizer': u'testko',
"creator": admin_user,
'start_date': datetime.datetime.now(),
'end_date': datetime.datetime.now() + datetime.timedelta(days=3, hours=3),
'title': u'Test Approved Event in other country',
'status':"APPROVED",
}
test_other_country_event = create_or_update_event(event_id=None, **event_data)
new_counter = count_approved_events_for_country()
counted_events_after = 0
country_score_after = 0
for country in new_counter:
if country['country_code'] == 'SI':
counted_events_after = country['events']
country_score_after = country['score']
# An extra check with a direct DB query
counted_events_query = Event.objects.filter(status='APPROVED').filter(country='SI').count()
assert counted_events_after == counted_events_before + 1
assert counted_events_after == counted_events_query
assert country_score_after > 0
test_approved_event.delete()
test_pending_event.delete()
test_other_country_event.delete()
|
{
"content_hash": "e097cae47d3eb4ab238b2f9d70625fbb",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 483,
"avg_line_length": 39.655855855855854,
"alnum_prop": 0.714571311736108,
"repo_name": "joseihf/coding-events",
"id": "07a95362a4438ec0e12e9664f5b17aac4d12149d",
"size": "22009",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "web/tests/test_events_processors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "207918"
},
{
"name": "HTML",
"bytes": "80282"
},
{
"name": "JavaScript",
"bytes": "150254"
},
{
"name": "Python",
"bytes": "197463"
},
{
"name": "Ruby",
"bytes": "41"
},
{
"name": "Shell",
"bytes": "3554"
}
],
"symlink_target": ""
}
|
"""
Master configuration file for Evennia.
NOTE: NO MODIFICATIONS SHOULD BE MADE TO THIS FILE!
All settings changes should be done by copy-pasting the variable and
its value to <gamedir>/server/conf/settings.py.
Hint: Don't copy&paste over more from this file than you actually want
to change. Anything you don't copy&paste will thus retain its default
value - which may change as Evennia is developed. This way you can
always be sure of what you have changed and what is default behaviour.
"""
from django.contrib.messages import constants as messages
from django.urls import reverse_lazy
import os
import sys
######################################################################
# Evennia base server config
######################################################################
# This is the name of your game. Make it catchy!
SERVERNAME = "Evennia"
# Short one-sentence blurb describing your game. Shown under the title
# on the website and could be used in online listings of your game etc.
GAME_SLOGAN = "Python MU* creation system"
# Lockdown mode will cut off the game from any external connections
# and only allow connections from localhost. Requires a cold reboot.
LOCKDOWN_MODE = False
# Activate telnet service
TELNET_ENABLED = True
# A list of ports the Evennia telnet server listens on Can be one or many.
TELNET_PORTS = [4000]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
TELNET_INTERFACES = ["0.0.0.0"]
# Activate Telnet+SSL protocol (SecureSocketLibrary) for supporting clients
SSL_ENABLED = False
# Ports to use for Telnet+SSL
SSL_PORTS = [4003]
# Telnet+SSL Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSL_INTERFACES = ["0.0.0.0"]
# OOB (out-of-band) telnet communication allows Evennia to communicate
# special commands and data with enabled Telnet clients. This is used
# to create custom client interfaces over a telnet connection. To make
# full use of OOB, you need to prepare functions to handle the data
# server-side (see INPUT_FUNC_MODULES). TELNET_ENABLED is required for this
# to work.
TELNET_OOB_ENABLED = False
# Activate SSH protocol communication (SecureShell)
SSH_ENABLED = False
# Ports to use for SSH
SSH_PORTS = [4004]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
SSH_INTERFACES = ["0.0.0.0"]
# Start the evennia django+twisted webserver so you can
# browse the evennia website and the admin interface
# (Obs - further web configuration can be found below
# in the section 'Config for Django web features')
WEBSERVER_ENABLED = True
# This is a security setting protecting against host poisoning
# attacks. It defaults to allowing all. In production, make
# sure to change this to your actual host addresses/IPs.
ALLOWED_HOSTS = ["*"]
# The webserver sits behind a Portal proxy. This is a list
# of tuples (proxyport,serverport) used. The proxyports are what
# the Portal proxy presents to the world. The serverports are
# the internal ports the proxy uses to forward data to the Server-side
# webserver (these should not be publicly open)
WEBSERVER_PORTS = [(4001, 4005)]
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSERVER_INTERFACES = ["0.0.0.0"]
# IP addresses that may talk to the server in a reverse proxy configuration,
# like NginX.
UPSTREAM_IPS = ["127.0.0.1"]
# The webserver uses threadpool for handling requests. This will scale
# with server load. Set the minimum and maximum number of threads it
# may use as (min, max) (must be > 0)
WEBSERVER_THREADPOOL_LIMITS = (1, 20)
# Start the evennia webclient. This requires the webserver to be running and
# offers the fallback ajax-based webclient backbone for browsers not supporting
# the websocket one.
WEBCLIENT_ENABLED = True
# Activate Websocket support for modern browsers. If this is on, the
# default webclient will use this and only use the ajax version if the browser
# is too old to support websockets. Requires WEBCLIENT_ENABLED.
WEBSOCKET_CLIENT_ENABLED = True
# Server-side websocket port to open for the webclient. Note that this value will
# be dynamically encoded in the webclient html page to allow the webclient to call
# home. If the external encoded value needs to be different than this, due to
# working through a proxy or docker port-remapping, the environment variable
# WEBCLIENT_CLIENT_PROXY_PORT can be used to override this port only for the
# front-facing client's sake.
WEBSOCKET_CLIENT_PORT = 4002
# Interface addresses to listen to. If 0.0.0.0, listen to all. Use :: for IPv6.
WEBSOCKET_CLIENT_INTERFACE = "0.0.0.0"
# Actual URL for webclient component to reach the websocket. You only need
# to set this if you know you need it, like using some sort of proxy setup.
# If given it must be on the form "ws[s]://hostname[:port]". If left at None,
# the client will itself figure out this url based on the server's hostname.
# e.g. ws://external.example.com or wss://external.example.com:443
WEBSOCKET_CLIENT_URL = None
# This determine's whether Evennia's custom admin page is used, or if the
# standard Django admin is used.
EVENNIA_ADMIN = True
# The Server opens an AMP port so that the portal can
# communicate with it. This is an internal functionality of Evennia, usually
# operating between two processes on the same machine. You usually don't need to
# change this unless you cannot use the default AMP port/host for
# whatever reason.
AMP_HOST = "localhost"
AMP_PORT = 4006
AMP_INTERFACE = "127.0.0.1"
# Path to the lib directory containing the bulk of the codebase's code.
EVENNIA_DIR = os.path.dirname(os.path.abspath(__file__))
# Path to the game directory (containing the server/conf/settings.py file)
# This is dynamically created- there is generally no need to change this!
if EVENNIA_DIR.lower() == os.getcwd().lower() or (
sys.argv[1] == "test" if len(sys.argv) > 1 else False
):
# unittesting mode
GAME_DIR = os.getcwd()
else:
# Fallback location (will be replaced by the actual game dir at runtime)
GAME_DIR = os.path.join(EVENNIA_DIR, "game_template")
for i in range(10):
gpath = os.getcwd()
if "server" in os.listdir(gpath):
if os.path.isfile(os.path.join("server", "conf", "settings.py")):
GAME_DIR = gpath
break
os.chdir(os.pardir)
# Place to put log files, how often to rotate the log and how big each log file
# may become before rotating.
LOG_DIR = os.path.join(GAME_DIR, "server", "logs")
SERVER_LOG_FILE = os.path.join(LOG_DIR, "server.log")
SERVER_LOG_DAY_ROTATION = 7
SERVER_LOG_MAX_SIZE = 1000000
PORTAL_LOG_FILE = os.path.join(LOG_DIR, "portal.log")
PORTAL_LOG_DAY_ROTATION = 7
PORTAL_LOG_MAX_SIZE = 1000000
# The http log is usually only for debugging since it's very spammy
HTTP_LOG_FILE = os.path.join(LOG_DIR, "http_requests.log")
# if this is set to the empty string, lockwarnings will be turned off.
LOCKWARNING_LOG_FILE = os.path.join(LOG_DIR, "lockwarnings.log")
# Number of lines to append to rotating channel logs when they rotate
CHANNEL_LOG_NUM_TAIL_LINES = 20
# Max size (in bytes) of channel log files before they rotate
CHANNEL_LOG_ROTATE_SIZE = 1000000
# Local time zone for this installation. All choices can be found here:
# http://www.postgresql.org/docs/8.0/interactive/datetime-keywords.html#DATETIME-TIMEZONE-SET-TABLE
TIME_ZONE = "UTC"
# Activate time zone in datetimes
USE_TZ = True
# Authentication backends. This is the code used to authenticate a user.
AUTHENTICATION_BACKENDS = ["evennia.web.utils.backends.CaseInsensitiveModelBackend"]
# Language code for this installation. All choices can be found here:
# http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes
LANGUAGE_CODE = "en-us"
# How long time (in seconds) a user may idle before being logged
# out. This can be set as big as desired. A user may avoid being
# thrown off by sending the empty system command 'idle' to the server
# at regular intervals. Set <=0 to deactivate idle timeout completely.
IDLE_TIMEOUT = -1
# The idle command can be sent to keep your session active without actually
# having to spam normal commands regularly. It gives no feedback, only updates
# the idle timer. Note that "idle" will *always* work, even if a different
# command-name is given here; this is because the webclient needs a default
# to send to avoid proxy timeouts.
IDLE_COMMAND = "idle"
# The set of encodings tried. An Account object may set an attribute "encoding" on
# itself to match the client used. If not set, or wrong encoding is
# given, this list is tried, in order, aborting on the first match.
# Add sets for languages/regions your accounts are likely to use.
# (see http://en.wikipedia.org/wiki/Character_encoding)
# Telnet default encoding, unless specified by the client, will be ENCODINGS[0].
ENCODINGS = ["utf-8", "latin-1", "ISO-8859-1"]
# Regular expression applied to all output to a given session in order
# to strip away characters (usually various forms of decorations) for the benefit
# of users with screen readers. Note that ANSI/MXP doesn't need to
# be stripped this way, that is handled automatically.
SCREENREADER_REGEX_STRIP = r"\+-+|\+$|\+~|--+|~~+|==+"
# Database objects are cached in what is known as the idmapper. The idmapper
# caching results in a massive speedup of the server (since it dramatically
# limits the number of database accesses needed) and also allows for
# storing temporary data on objects. It is however also the main memory
# consumer of Evennia. With this setting the cache can be capped and
# flushed when it reaches a certain size. Minimum is 50 MB but it is
# not recommended to set this to less than 100 MB for a distribution
# system.
# Empirically, N_objects_in_cache ~ ((RMEM - 35) / 0.0157):
# mem(MB) | objs in cache || mem(MB) | objs in cache
# 50 | ~1000 || 800 | ~49 000
# 100 | ~4000 || 1200 | ~75 000
# 200 | ~10 000 || 1600 | ~100 000
# 500 | ~30 000 || 2000 | ~125 000
# Note that the estimated memory usage is not exact (and the cap is only
# checked every 5 minutes), so err on the side of caution if
# running on a server with limited memory. Also note that Python
# will not necessarily return the memory to the OS when the idmapper
# flashes (the memory will be freed and made available to the Python
# process only). How many objects need to be in memory at any given
# time depends very much on your game so some experimentation may
# be necessary (use @server to see how many objects are in the idmapper
# cache at any time). Setting this to None disables the cache cap.
IDMAPPER_CACHE_MAXSIZE = 200 # (MB)
# This determines how many connections per second the Portal should
# accept, as a DoS countermeasure. If the rate exceeds this number, incoming
# connections will be queued to this rate, so none will be lost.
# Must be set to a value > 0.
MAX_CONNECTION_RATE = 2
# Determine how many commands per second a given Session is allowed
# to send to the Portal via a connected protocol. Too high rate will
# drop the command and echo a warning. Note that this will also cap
# OOB messages so don't set it too low if you expect a lot of events
# from the client! To turn the limiter off, set to <= 0.
MAX_COMMAND_RATE = 80
# The warning to echo back to users if they send commands too fast
COMMAND_RATE_WARNING = "You entered commands too fast. Wait a moment and try again."
# Determine how large of a string can be sent to the server in number
# of characters. If they attempt to enter a string over this character
# limit, we stop them and send a message. To make unlimited, set to
# 0 or less.
MAX_CHAR_LIMIT = 6000
# The warning to echo back to users if they enter a very large string
MAX_CHAR_LIMIT_WARNING = (
"You entered a string that was too long. " "Please break it up into multiple parts."
)
# If this is true, errors and tracebacks from the engine will be
# echoed as text in-game as well as to the log. This can speed up
# debugging. OBS: Showing full tracebacks to regular users could be a
# security problem -turn this off in a production game!
IN_GAME_ERRORS = True
######################################################################
# Evennia Database config
######################################################################
# Database config syntax:
# ENGINE - path to the the database backend. Possible choices are:
# 'django.db.backends.sqlite3', (default)
# 'django.db.backends.mysql',
# 'django.db.backends.postgresql',
# 'django.db.backends.oracle' (untested).
# NAME - database name, or path to the db file for sqlite3
# USER - db admin (unused in sqlite3)
# PASSWORD - db admin password (unused in sqlite3)
# HOST - empty string is localhost (unused in sqlite3)
# PORT - empty string defaults to localhost (unused in sqlite3)
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.getenv("TEST_DB_PATH", os.path.join(GAME_DIR, "server", "evennia.db3")),
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
}
}
# How long the django-database connection should be kept open, in seconds.
# If you get errors about the database having gone away after long idle
# periods, shorten this value (e.g. MySQL defaults to a timeout of 8 hrs)
CONN_MAX_AGE = 3600 * 7
# When removing or renaming models, such models stored in Attributes may
# become orphaned and will return as None. If the change is a rename (that
# is, there is a 1:1 pk mapping between the old and the new), the unserializer
# can convert old to new when retrieving them. This is a list of tuples
# (old_natural_key, new_natural_key). Note that Django ContentTypes'
# natural_keys are themselves tuples (appname, modelname). Creation-dates will
# not be checked for models specified here. If new_natural_key does not exist,
# `None` will be returned and stored back as if no replacement was set.
ATTRIBUTE_STORED_MODEL_RENAME = [
(("players", "playerdb"), ("accounts", "accountdb")),
(("typeclasses", "defaultplayer"), ("typeclasses", "defaultaccount")),
]
# Default type of autofield (required by Django)
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
######################################################################
# Evennia pluggable modules
######################################################################
# Plugin modules extend Evennia in various ways. In the cases with no
# existing default, there are examples of many of these modules
# in contrib/examples.
# The command parser module to use. See the default module for which
# functions it must implement
COMMAND_PARSER = "evennia.commands.cmdparser.cmdparser"
# On a multi-match when search objects or commands, the user has the
# ability to search again with an index marker that differentiates
# the results. If multiple "box" objects
# are found, they can by default be separated as 1-box, 2-box. Below you
# can change the regular expression used. The regex must have one
# have two capturing groups (?P<number>...) and (?P<name>...) - the default
# parser expects this. It should also involve a number starting from 1.
# When changing this you must also update SEARCH_MULTIMATCH_TEMPLATE
# to properly describe the syntax.
SEARCH_MULTIMATCH_REGEX = r"(?P<number>[0-9]+)-(?P<name>.*)"
# To display multimatch errors in various listings we must display
# the syntax in a way that matches what SEARCH_MULTIMATCH_REGEX understand.
# The template will be populated with data and expects the following markup:
# {number} - the order of the multimatch, starting from 1; {name} - the
# name (key) of the multimatched entity; {aliases} - eventual
# aliases for the entity; {info} - extra info like #dbrefs for staff. Don't
# forget a line break if you want one match per line.
SEARCH_MULTIMATCH_TEMPLATE = " {number}-{name}{aliases}{info}\n"
# The handler that outputs errors when using any API-level search
# (not manager methods). This function should correctly report errors
# both for command- and object-searches. This allows full control
# over the error output (it uses SEARCH_MULTIMATCH_TEMPLATE by default).
SEARCH_AT_RESULT = "evennia.utils.utils.at_search_result"
# Single characters to ignore at the beginning of a command. When set, e.g.
# cmd, @cmd and +cmd will all find a command "cmd" or one named "@cmd" etc. If
# you have defined two different commands cmd and @cmd you can still enter
# @cmd to exactly target the second one. Single-character commands consisting
# of only a prefix character will not be stripped. Set to the empty
# string ("") to turn off prefix ignore.
CMD_IGNORE_PREFIXES = "@&/+"
# The module holding text strings for the connection screen.
# This module should contain one or more variables
# with strings defining the look of the screen.
CONNECTION_SCREEN_MODULE = "server.conf.connection_screens"
# Delay to use before sending the evennia.syscmdkeys.CMD_LOGINSTART Command
# when a new session connects (this defaults the unloggedin-look for showing
# the connection screen). The delay is useful mainly for telnet, to allow
# client/server to establish client capabilities like color/mxp etc before
# sending any text. A value of 0.3 should be enough. While a good idea, it may
# cause issues with menu-logins and autoconnects since the menu will not have
# started when the autoconnects starts sending menu commands.
DELAY_CMD_LOGINSTART = 0.3
# An optional module that, if existing, must hold a function
# named at_initial_setup(). This hook method can be used to customize
# the server's initial setup sequence (the very first startup of the system).
# The check will fail quietly if module doesn't exist or fails to load.
AT_INITIAL_SETUP_HOOK_MODULE = "server.conf.at_initial_setup"
# Module containing your custom at_server_start(), at_server_reload() and
# at_server_stop() methods. These methods will be called every time
# the server starts, reloads and resets/stops respectively.
AT_SERVER_STARTSTOP_MODULE = "server.conf.at_server_startstop"
# List of one or more module paths to modules containing a function start_
# plugin_services(application). This module will be called with the main
# Evennia Server application when the Server is initiated.
# It will be called last in the startup sequence.
SERVER_SERVICES_PLUGIN_MODULES = ["server.conf.server_services_plugins"]
# List of one or more module paths to modules containing a function
# start_plugin_services(application). This module will be called with the
# main Evennia Portal application when the Portal is initiated.
# It will be called last in the startup sequence.
PORTAL_SERVICES_PLUGIN_MODULES = ["server.conf.portal_services_plugins"]
# Module holding MSSP meta data. This is used by MUD-crawlers to determine
# what type of game you are running, how many accounts you have etc.
MSSP_META_MODULE = "server.conf.mssp"
# Module for web plugins.
WEB_PLUGINS_MODULE = "server.conf.web_plugins"
# Tuple of modules implementing lock functions. All callable functions
# inside these modules will be available as lock functions.
LOCK_FUNC_MODULES = ("evennia.locks.lockfuncs", "server.conf.lockfuncs")
# Module holding handlers for managing incoming data from the client. These
# will be loaded in order, meaning functions in later modules may overload
# previous ones if having the same name.
INPUT_FUNC_MODULES = ["evennia.server.inputfuncs", "server.conf.inputfuncs"]
# Modules that contain prototypes for use with the spawner mechanism.
PROTOTYPE_MODULES = ["world.prototypes"]
# Modules containining Prototype functions able to be embedded in prototype
# definitions from in-game.
PROT_FUNC_MODULES = ["evennia.prototypes.protfuncs"]
# Module holding settings/actions for the dummyrunner program (see the
# dummyrunner for more information)
DUMMYRUNNER_SETTINGS_MODULE = "evennia.server.profiling.dummyrunner_settings"
# Mapping to extend Evennia's normal ANSI color tags. The mapping is a list of
# tuples mapping the exact tag (not a regex!) to the ANSI convertion, like
# `(r"%c%r", ansi.ANSI_RED)` (the evennia.utils.ansi module contains all
# ANSI escape sequences). Default is to use `|` and `|[` -prefixes.
# Note that to apply all color changes, a full `evennia reboot` is needed!
COLOR_ANSI_EXTRA_MAP = []
# Extend the available regexes for adding XTERM256 colors in-game. This is given
# as a list of regexes, where each regex must contain three anonymous groups for
# holding integers 0-5 for the red, green and blue components Default is
# is r'\|([0-5])([0-5])([0-5])', which allows e.g. |500 for red.
# Note that to apply all color changes, a full `evennia reboot` is needed!
COLOR_ANSI_EXTRA_MAP = []
# XTERM256 foreground color replacement
# Note that to apply all color changes, a full `evennia reboot` is needed!
COLOR_XTERM256_EXTRA_FG = []
# XTERM256 background color replacement. Default is \|\[([0-5])([0-5])([0-5])'
# Note that to apply all color changes, a full `evennia reboot` is needed!
COLOR_XTERM256_EXTRA_BG = []
# Extend the available regexes for adding XTERM256 grayscale values in-game. Given
# as a list of regexes, where each regex must contain one anonymous group containing
# a single letter a-z to mark the level from white to black. Default is r'\|=([a-z])',
# which allows e.g. |=k for a medium gray.
# XTERM256 grayscale foreground
# Note that to apply all color changes, a full `evennia reboot` is needed!
COLOR_XTERM256_EXTRA_GFG = []
# XTERM256 grayscale background. Default is \|\[=([a-z])'
# Note that to apply all color changes, a full `evennia reboot` is needed!
COLOR_XTERM256_EXTRA_GBG = []
# ANSI does not support bright backgrounds, so Evennia fakes this by mapping it to
# XTERM256 backgrounds where supported. This is a list of tuples that maps the wanted
# ansi tag (not a regex!) to a valid XTERM256 tag, such as `(r'|o', r'|531')`
# for orange. By default this is only used for bright backgrounds but
# both bright and dark colors can be mapped this way, and is a way to add
# new shortcuts to xterm colors without having to write the RGB value.
# Note that to apply all color changes, a full `evennia reboot` is needed!
COLOR_ANSI_XTERM256_BRIGHT_BG_EXTRA_MAP = []
# If set True, the above color settings *replace* the default |-style color markdown
# rather than extend it.
# Note that to apply all color changes, a full `evennia reboot` is needed!
COLOR_NO_DEFAULT = False
######################################################################
# Default command sets and commands
######################################################################
# Command set used on session before account has logged in
CMDSET_UNLOGGEDIN = "commands.default_cmdsets.UnloggedinCmdSet"
# (Note that changing these three following cmdset paths will only affect NEW
# created characters/objects, not those already in play. So if you want to
# change this and have it apply to every object, it's recommended you do it
# before having created a lot of objects (or simply reset the database after
# the change for simplicity)).
# Command set used on the logged-in session
CMDSET_SESSION = "commands.default_cmdsets.SessionCmdSet"
# Default set for logged in account with characters (fallback)
CMDSET_CHARACTER = "commands.default_cmdsets.CharacterCmdSet"
# Command set for accounts without a character (ooc)
CMDSET_ACCOUNT = "commands.default_cmdsets.AccountCmdSet"
# Location to search for cmdsets if full path not given
CMDSET_PATHS = ["commands", "evennia", "evennia.contrib"]
# Fallbacks for cmdset paths that fail to load. Note that if you change the path for your
# default cmdsets, you will also need to copy CMDSET_FALLBACKS after your change in your
# settings file for it to detect the change.
CMDSET_FALLBACKS = {
CMDSET_CHARACTER: "evennia.commands.default.cmdset_character.CharacterCmdSet",
CMDSET_ACCOUNT: "evennia.commands.default.cmdset_account.AccountCmdSet",
CMDSET_SESSION: "evennia.commands.default.cmdset_session.SessionCmdSet",
CMDSET_UNLOGGEDIN: "evennia.commands.default.cmdset_unloggedin.UnloggedinCmdSet",
}
# Parent class for all default commands. Changing this class will
# modify all default commands, so do so carefully.
COMMAND_DEFAULT_CLASS = "evennia.commands.default.muxcommand.MuxCommand"
# Command.arg_regex is a regular expression desribing how the arguments
# to the command must be structured for the command to match a given user
# input. By default there is no restriction as long as the input string
# starts with the command name.
COMMAND_DEFAULT_ARG_REGEX = None
# By default, Command.msg will only send data to the Session calling
# the Command in the first place. If set, Command.msg will instead return
# data to all Sessions connected to the Account/Character associated with
# calling the Command. This may be more intuitive for users in certain
# multisession modes.
COMMAND_DEFAULT_MSG_ALL_SESSIONS = False
# The help category of a command if not otherwise specified.
COMMAND_DEFAULT_HELP_CATEGORY = "general"
# The default lockstring of a command.
COMMAND_DEFAULT_LOCKS = ""
# The Channel Handler is responsible for managing all available channels. By
# default it builds the current channels into a channel-cmdset that it feeds
# to the cmdhandler. Overloading this can completely change how Channels
# are identified and called.
CHANNEL_HANDLER_CLASS = "evennia.comms.channelhandler.ChannelHandler"
# The (default) Channel Handler will create a command to represent each
# channel, creating it with the key of the channel, its aliases, locks etc. The
# default class logs channel messages to a file and allows for /history. This
# setting allows to override the command class used with your own.
# If you implement CHANNEL_HANDLER_CLASS, you can change this directly and will
# likely not need this setting.
CHANNEL_COMMAND_CLASS = "evennia.comms.channelhandler.ChannelCommand"
######################################################################
# Typeclasses and other paths
######################################################################
# Server-side session class used.
SERVER_SESSION_CLASS = "evennia.server.serversession.ServerSession"
# These are paths that will be prefixed to the paths given if the
# immediately entered path fail to find a typeclass. It allows for
# shorter input strings. They must either base off the game directory
# or start from the evennia library.
TYPECLASS_PATHS = ["typeclasses", "evennia", "evennia.contrib", "evennia.contrib.tutorial_examples"]
# Typeclass for account objects (linked to a character) (fallback)
BASE_ACCOUNT_TYPECLASS = "typeclasses.accounts.Account"
# Typeclass and base for all objects (fallback)
BASE_OBJECT_TYPECLASS = "typeclasses.objects.Object"
# Typeclass for character objects linked to an account (fallback)
BASE_CHARACTER_TYPECLASS = "typeclasses.characters.Character"
# Typeclass for rooms (fallback)
BASE_ROOM_TYPECLASS = "typeclasses.rooms.Room"
# Typeclass for Exit objects (fallback).
BASE_EXIT_TYPECLASS = "typeclasses.exits.Exit"
# Typeclass for Channel (fallback).
BASE_CHANNEL_TYPECLASS = "typeclasses.channels.Channel"
# Typeclass for Scripts (fallback). You usually don't need to change this
# but create custom variations of scripts on a per-case basis instead.
BASE_SCRIPT_TYPECLASS = "typeclasses.scripts.Script"
# The default home location used for all objects. This is used as a
# fallback if an object's normal home location is deleted. Default
# is Limbo (#2).
DEFAULT_HOME = "#2"
# The start position for new characters. Default is Limbo (#2).
# MULTISESSION_MODE = 0, 1 - used by default unloggedin create command
# MULTISESSION_MODE = 2, 3 - used by default character_create command
START_LOCATION = "#2"
# Lookups of Attributes, Tags, Nicks, Aliases can be aggressively
# cached to avoid repeated database hits. This often gives noticeable
# performance gains since they are called so often. Drawback is that
# if you are accessing the database from multiple processes (such as
# from a website -not- running Evennia's own webserver) data may go
# out of sync between the processes. Keep on unless you face such
# issues.
TYPECLASS_AGGRESSIVE_CACHE = True
# These are fallbacks for BASE typeclasses failing to load. Usually needed only
# during doc building. The system expects these to *always* load correctly, so
# only modify if you are making fundamental changes to how objects/accounts
# work and know what you are doing
FALLBACK_ACCOUNT_TYPECLASS = "evennia.accounts.accounts.DefaultAccount"
FALLBACK_OBJECT_TYPECLASS = "evennia.objects.objects.DefaultObject"
FALLBACK_CHARACTER_TYPECLASS = "evennia.objects.objects.DefaultCharacter"
FALLBACK_ROOM_TYPECLASS = "evennia.objects.objects.DefaultRoom"
FALLBACK_EXIT_TYPECLASS = "evennia.objects.objects.DefaultExit"
FALLBACK_CHANNEL_TYPECLASS = "evennia.comms.comms.DefaultChannel"
FALLBACK_SCRIPT_TYPECLASS = "evennia.scripts.scripts.DefaultScript"
######################################################################
# Options and validators
######################################################################
# Options available on Accounts. Each such option is described by a
# class available from evennia.OPTION_CLASSES, in turn making use
# of validators from evennia.VALIDATOR_FUNCS to validate input when
# the user changes an option. The options are accessed through the
# `Account.options` handler.
# ("Description", 'Option Class name in evennia.OPTION_CLASS_MODULES', 'Default Value')
OPTIONS_ACCOUNT_DEFAULT = {
"border_color": ("Headers, footers, table borders, etc.", "Color", "n"),
"header_star_color": ("* inside Header lines.", "Color", "n"),
"header_text_color": ("Text inside Header lines.", "Color", "w"),
"header_fill": ("Fill for Header lines.", "Text", "="),
"separator_star_color": ("* inside Separator lines.", "Color", "n"),
"separator_text_color": ("Text inside Separator lines.", "Color", "w"),
"separator_fill": ("Fill for Separator Lines.", "Text", "-"),
"footer_star_color": ("* inside Footer lines.", "Color", "n"),
"footer_text_color": ("Text inside Footer Lines.", "Color", "n"),
"footer_fill": ("Fill for Footer Lines.", "Text", "="),
"column_names_color": ("Table column header text.", "Color", "w"),
"help_category_color": ("Help category names.", "Color", "n"),
"help_entry_color": ("Help entry names.", "Color", "n"),
"timezone": ("Timezone for dates. @tz for a list.", "Timezone", "UTC"),
}
# Modules holding Option classes, responsible for serializing the option and
# calling validator functions on it. Same-named functions in modules added
# later in this list will override those added earlier.
OPTION_CLASS_MODULES = ["evennia.utils.optionclasses"]
# Module holding validator functions. These are used as a resource for
# validating options, but can also be used as input validators in general.
# Same-named functions in modules added later in this list will override those
# added earlier.
VALIDATOR_FUNC_MODULES = ["evennia.utils.validatorfuncs"]
######################################################################
# Batch processors
######################################################################
# Python path to a directory to be searched for batch scripts
# for the batch processors (.ev and/or .py files).
BASE_BATCHPROCESS_PATHS = ["world", "evennia.contrib", "evennia.contrib.tutorial_examples"]
######################################################################
# Game Time setup
######################################################################
# You don't actually have to use this, but it affects the routines in
# evennia.utils.gametime.py and allows for a convenient measure to
# determine the current in-game time. You can of course interpret
# "week", "month" etc as your own in-game time units as desired.
# The time factor dictates if the game world runs faster (timefactor>1)
# or slower (timefactor<1) than the real world.
TIME_FACTOR = 2.0
# The starting point of your game time (the epoch), in seconds.
# In Python a value of 0 means Jan 1 1970 (use negatives for earlier
# start date). This will affect the returns from the utils.gametime
# module. If None, the server's first start-time is used as the epoch.
TIME_GAME_EPOCH = None
# Normally, game time will only increase when the server runs. If this is True,
# game time will not pause when the server reloads or goes offline. This setting
# together with a time factor of 1 should keep the game in sync with
# the real time (add a different epoch to shift time)
TIME_IGNORE_DOWNTIMES = False
######################################################################
# Inlinefunc, PrototypeFuncs
######################################################################
# Evennia supports inline function preprocessing. This allows users
# to supply inline calls on the form $func(arg, arg, ...) to do
# session-aware text formatting and manipulation on the fly. If
# disabled, such inline functions will not be parsed.
INLINEFUNC_ENABLED = False
# This defined how deeply nested inlinefuncs can be. Set to <=0 to
# disable (not recommended, this is a safeguard against infinite loops).
INLINEFUNC_STACK_MAXSIZE = 20
# Only functions defined globally (and not starting with '_') in
# these modules will be considered valid inlinefuncs. The list
# is loaded from left-to-right, same-named functions will overload
INLINEFUNC_MODULES = ["evennia.utils.inlinefuncs", "server.conf.inlinefuncs"]
# Module holding handlers for ProtFuncs. These allow for embedding
# functional code in prototypes and has the same syntax as inlinefuncs.
PROTOTYPEFUNC_MODULES = ["evennia.utils.prototypefuncs", "server.conf.prototypefuncs"]
######################################################################
# Global Scripts
######################################################################
# Global scripts started here will be available through
# 'evennia.GLOBAL_SCRIPTS.key'. The scripts will survive a reload and be
# recreated automatically if deleted. Each entry must have the script keys,
# whereas all other fields in the specification are optional. If 'typeclass' is
# not given, BASE_SCRIPT_TYPECLASS will be assumed. Note that if you change
# typeclass for the same key, a new Script will replace the old one on
# `evennia.GLOBAL_SCRIPTS`.
GLOBAL_SCRIPTS = {
# 'key': {'typeclass': 'typeclass.path.here',
# 'repeats': -1, 'interval': 50, 'desc': 'Example script'},
}
######################################################################
# Default Account setup and access
######################################################################
# Different Multisession modes allow a player (=account) to connect to the
# game simultaneously with multiple clients (=sessions). In modes 0,1 there is
# only one character created to the same name as the account at first login.
# In modes 2,3 no default character will be created and the MAX_NR_CHARACTERS
# value (below) defines how many characters the default char_create command
# allow per account.
# 0 - single session, one account, one character, when a new session is
# connected, the old one is disconnected
# 1 - multiple sessions, one account, one character, each session getting
# the same data
# 2 - multiple sessions, one account, many characters, one session per
# character (disconnects multiplets)
# 3 - like mode 2, except multiple sessions can puppet one character, each
# session getting the same data.
MULTISESSION_MODE = 0
# The maximum number of characters allowed by the default ooc char-creation command
MAX_NR_CHARACTERS = 1
# The access hierarchy, in climbing order. A higher permission in the
# hierarchy includes access of all levels below it. Used by the perm()/pperm()
# lock functions, which accepts both plural and singular (Admin & Admins)
PERMISSION_HIERARCHY = [
"Guest", # note-only used if GUEST_ENABLED=True
"Player",
"Helper",
"Builder",
"Admin",
"Developer",
]
# The default permission given to all new accounts
PERMISSION_ACCOUNT_DEFAULT = "Player"
# Default sizes for client window (in number of characters), if client
# is not supplying this on its own
CLIENT_DEFAULT_WIDTH = 78
# telnet standard height is 24; does anyone use such low-res displays anymore?
CLIENT_DEFAULT_HEIGHT = 45
# Help output from CmdHelp are wrapped in an EvMore call
# (excluding webclient with separate help popups). If continuous scroll
# is preferred, change 'HELP_MORE' to False. EvMORE uses CLIENT_DEFAULT_HEIGHT
HELP_MORE = True
# Set rate limits per-IP on account creations and login attempts
CREATION_THROTTLE_LIMIT = 2
CREATION_THROTTLE_TIMEOUT = 10 * 60
LOGIN_THROTTLE_LIMIT = 5
LOGIN_THROTTLE_TIMEOUT = 5 * 60
######################################################################
# Guest accounts
######################################################################
# This enables guest logins, by default via "connect guest". Note that
# you need to edit your login screen to inform about this possibility.
GUEST_ENABLED = False
# Typeclass for guest account objects (linked to a character)
BASE_GUEST_TYPECLASS = "typeclasses.accounts.Guest"
# The permission given to guests
PERMISSION_GUEST_DEFAULT = "Guests"
# The default home location used for guests.
GUEST_HOME = DEFAULT_HOME
# The start position used for guest characters.
GUEST_START_LOCATION = START_LOCATION
# The naming convention used for creating new guest
# accounts/characters. The size of this list also determines how many
# guests may be on the game at once. The default is a maximum of nine
# guests, named Guest1 through Guest9.
GUEST_LIST = ["Guest" + str(s + 1) for s in range(9)]
######################################################################
# In-game Channels created from server start
######################################################################
# The mudinfo channel must always exist; it is used by Evennia itself to
# relay status messages, connection info etc to staff. The superuser will be
# automatically subscribed to this channel and it will be recreated on a
# reload if deleted. This is a dict specifying the kwargs needed to create
# the channel .
CHANNEL_MUDINFO = {
"key": "MudInfo",
"aliases": "",
"desc": "Connection log",
"locks": "control:perm(Developer);listen:perm(Admin);send:false()",
}
# These are additional channels to offer. Usually, at least 'public'
# should exist. The superuser will automatically be subscribed to all channels
# in this list. New entries will be created on the next reload. But
# removing or updating a same-key channel from this list will NOT automatically
# change/remove it in the game, that needs to be done manually.
DEFAULT_CHANNELS = [
# public channel
{
"key": "Public",
"aliases": ("pub"),
"desc": "Public discussion",
"locks": "control:perm(Admin);listen:all();send:all()",
}
]
# Optional channel info (same form as CHANNEL_MUDINFO) for the channel to
# receive connection messages ("<account> has (dis)connected"). While the
# MudInfo channel will also receieve this info, this channel is meant for
# non-staffers.
CHANNEL_CONNECTINFO = None
######################################################################
# External Connections
######################################################################
# Note: You do *not* have to make your MUD open to
# the public to use the external connections, they
# operate as long as you have an internet connection,
# just like stand-alone chat clients.
# The Evennia Game Index is a dynamic listing of Evennia games. You can add your game
# to this list also if it is in closed pre-alpha development.
GAME_INDEX_ENABLED = False
# This dict
GAME_INDEX_LISTING = {
"game_name": "Mygame", # usually SERVERNAME
"game_status": "pre-alpha", # pre-alpha, alpha, beta or launched
"short_description": "", # could be GAME_SLOGAN
"long_description": "",
"listing_contact": "", # email
"telnet_hostname": "", # mygame.com
"telnet_port": "", # 1234
"game_website": "", # http://mygame.com
"web_client_url": "", # http://mygame.com/webclient
}
# Evennia can connect to external IRC channels and
# echo what is said on the channel to IRC and vice
# versa. Obs - make sure the IRC network allows bots.
# When enabled, command @irc2chan will be available in-game
# IRC requires that you have twisted.words installed.
IRC_ENABLED = False
# RSS allows to connect RSS feeds (from forum updates, blogs etc) to
# an in-game channel. The channel will be updated when the rss feed
# updates. Use @rss2chan in game to connect if this setting is
# active. OBS: RSS support requires the python-feedparser package to
# be installed (through package manager or from the website
# http://code.google.com/p/feedparser/)
RSS_ENABLED = False
RSS_UPDATE_INTERVAL = 60 * 10 # 10 minutes
# Grapevine (grapevine.haus) is a network for listing MUDs as well as allow
# users of said MUDs to communicate with each other on shared channels. To use,
# your game must first be registered by logging in and creating a game entry at
# https://grapevine.haus. Evennia links grapevine channels to in-game channels
# with the @grapevine2chan command, available once this flag is set
# Grapevine requires installing the pyopenssl library (pip install pyopenssl)
GRAPEVINE_ENABLED = False
# Grapevine channels to allow connection to. See https://grapevine.haus/chat
# for the available channels. Only channels in this list can be linked to in-game
# channels later.
GRAPEVINE_CHANNELS = ["gossip", "testing"]
# Grapevine authentication. Register your game at https://grapevine.haus to get
# them. These are secret and should thus be overridden in secret_settings file
GRAPEVINE_CLIENT_ID = ""
GRAPEVINE_CLIENT_SECRET = ""
######################################################################
# Django web features
######################################################################
# While DEBUG is False, show a regular server error page on the web
# stuff, email the traceback to the people in the ADMINS tuple
# below. If True, show a detailed traceback for the web
# browser to display. Note however that this will leak memory when
# active, so make sure to turn it off for a production server!
DEBUG = False
# Emails are sent to these people if the above DEBUG value is False. If you'd
# rather prefer nobody receives emails, leave this commented out or empty.
ADMINS = () # 'Your Name', 'your_email@domain.com'),)
# These guys get broken link notifications when SEND_BROKEN_LINK_EMAILS is True.
MANAGERS = ADMINS
# This is a public point of contact for players or the public to contact
# a staff member or administrator of the site. It is publicly posted.
STAFF_CONTACT_EMAIL = None
# Absolute path to the directory that holds file uploads from web apps.
# Example: "/home/media/media.lawrence.com"
MEDIA_ROOT = os.path.join(GAME_DIR, "web", "media")
# If using Sites/Pages from the web admin, this value must be set to the
# database-id of the Site (domain) we want to use with this game's Pages.
SITE_ID = 1
# The age for sessions.
# Default: 1209600 (2 weeks, in seconds)
SESSION_COOKIE_AGE = 1209600
# Session cookie domain
# Default: None
SESSION_COOKIE_DOMAIN = None
# The name of the cookie to use for sessions.
# Default: 'sessionid'
SESSION_COOKIE_NAME = "sessionid"
# Should the session expire when the browser closes?
# Default: False
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Where to find locales (no need to change this, most likely)
LOCALE_PATHS = [os.path.join(EVENNIA_DIR, "locale/")]
# This should be turned off unless you want to do tests with Django's
# development webserver (normally Evennia runs its own server)
SERVE_MEDIA = False
# The master urlconf file that contains all of the sub-branches to the
# applications. Change this to add your own URLs to the website.
ROOT_URLCONF = "web.urls"
# Where users are redirected after logging in via contrib.auth.login.
LOGIN_REDIRECT_URL = "/"
# Where to redirect users when using the @login_required decorator.
LOGIN_URL = reverse_lazy("login")
# Where to redirect users who wish to logout.
LOGOUT_URL = reverse_lazy("logout")
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com"
MEDIA_URL = "/media/"
# URL prefix for admin media -- CSS, JavaScript and images. Make sure
# to use a trailing slash. Django1.4+ will look for admin files under
# STATIC_URL/admin.
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(GAME_DIR, "web", "static")
# Location of static data to overload the defaults from
# evennia/web/webclient and evennia/web/website's static/ dirs.
STATICFILES_DIRS = [os.path.join(GAME_DIR, "web", "static_overrides")]
# Patterns of files in the static directories. Used here to make sure that
# its readme file is preserved but unused.
STATICFILES_IGNORE_PATTERNS = ["README.md"]
# The name of the currently selected web template. This corresponds to the
# directory names shown in the templates directory.
WEBSITE_TEMPLATE = "website"
WEBCLIENT_TEMPLATE = "webclient"
# The default options used by the webclient
WEBCLIENT_OPTIONS = {
"gagprompt": True, # Gags prompt from the output window and keep them
# together with the input bar
"helppopup": False, # Shows help files in a new popup window
"notification_popup": False, # Shows notifications of new messages as
# popup windows
"notification_sound": False # Plays a sound for notifications of new
# messages
}
# We setup the location of the website template as well as the admin site.
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(GAME_DIR, "web", "template_overrides", WEBSITE_TEMPLATE),
os.path.join(GAME_DIR, "web", "template_overrides", WEBCLIENT_TEMPLATE),
os.path.join(GAME_DIR, "web", "template_overrides"),
os.path.join(EVENNIA_DIR, "web", "website", "templates", WEBSITE_TEMPLATE),
os.path.join(EVENNIA_DIR, "web", "website", "templates"),
os.path.join(EVENNIA_DIR, "web", "webclient", "templates", WEBCLIENT_TEMPLATE),
os.path.join(EVENNIA_DIR, "web", "webclient", "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.i18n",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.media",
"django.template.context_processors.debug",
"django.contrib.messages.context_processors.messages",
"sekizai.context_processors.sekizai",
"evennia.web.utils.general_context.general_context",
],
# While true, show "pretty" error messages for template syntax errors.
"debug": DEBUG,
},
}
]
# MiddleWare are semi-transparent extensions to Django's functionality.
# see http://www.djangoproject.com/documentation/middleware/ for a more detailed
# explanation.
MIDDLEWARE = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.messages.middleware.MessageMiddleware", # 1.4?
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.admindocs.middleware.XViewMiddleware",
"django.contrib.flatpages.middleware.FlatpageFallbackMiddleware",
"evennia.web.utils.middleware.SharedLoginMiddleware",
]
######################################################################
# Evennia components
######################################################################
# Global and Evennia-specific apps. This ties everything together so we can
# refer to app models and perform DB syncs.
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.admin",
"django.contrib.admindocs",
"django.contrib.flatpages",
"django.contrib.sites",
"django.contrib.staticfiles",
"django.contrib.messages",
"sekizai",
"evennia.utils.idmapper",
"evennia.server",
"evennia.typeclasses",
"evennia.accounts",
"evennia.objects",
"evennia.comms",
"evennia.help",
"evennia.scripts",
"evennia.web.website",
"evennia.web.webclient",
]
# The user profile extends the User object with more functionality;
# This should usually not be changed.
AUTH_USER_MODEL = "accounts.AccountDB"
# Password validation plugins
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {"min_length": 8},
},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
{"NAME": "evennia.server.validators.EvenniaPasswordValidator"},
]
# Username validation plugins
AUTH_USERNAME_VALIDATORS = [
{"NAME": "django.contrib.auth.validators.ASCIIUsernameValidator"},
{"NAME": "django.core.validators.MinLengthValidator", "OPTIONS": {"limit_value": 3}},
{"NAME": "django.core.validators.MaxLengthValidator", "OPTIONS": {"limit_value": 30}},
{"NAME": "evennia.server.validators.EvenniaUsernameAvailabilityValidator"},
]
# Use a custom test runner that just tests Evennia-specific apps.
TEST_RUNNER = "evennia.server.tests.testrunner.EvenniaTestSuiteRunner"
# Messages and Bootstrap don't classify events the same way; this setting maps
# messages.error() to Bootstrap 'danger' classes.
MESSAGE_TAGS = {messages.ERROR: "danger"}
######################################################################
# Django extensions
######################################################################
# Django extesions are useful third-party tools that are not
# always included in the default django distro.
try:
import django_extensions # noqa
INSTALLED_APPS += ["django_extensions"]
except ImportError:
# Django extensions are not installed in all distros.
pass
#######################################################################
# SECRET_KEY
#######################################################################
# This is the signing key for the cookies generated by Evennia's
# web interface.
#
# It is a fallback for the SECRET_KEY setting in settings.py, which
# is randomly seeded when settings.py is first created. If copying
# from here, make sure to change it!
SECRET_KEY = "changeme!(*#&*($&*(#*(&SDFKJJKLS*(@#KJAS"
|
{
"content_hash": "b212fdbd6d47da5964a820122569b46b",
"timestamp": "",
"source": "github",
"line_count": 1010,
"max_line_length": 100,
"avg_line_length": 50.52871287128713,
"alnum_prop": 0.7048830191636948,
"repo_name": "jamesbeebop/evennia",
"id": "15c7807977a7fd9ea6c7468fba492fc24ef52a3d",
"size": "51034",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "evennia/settings_default.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "19127"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "HTML",
"bytes": "13558"
},
{
"name": "JavaScript",
"bytes": "24398"
},
{
"name": "Python",
"bytes": "2143170"
}
],
"symlink_target": ""
}
|
from django import template
from ..models import *
from django.db.models.aggregates import Count
from django.core.cache import cache
register = template.Library()
@register.simple_tag
def get_category():
category_list = Category.objects.annotate(post_num=Count('post')).order_by('-post_num')
Category.objects.values()
return category_list
@register.simple_tag
def get_tag():
tag_list = Tag.objects.annotate(post_num=Count('post')).order_by('-post_num')
return tag_list
@register.simple_tag
def get_reading_rank(num=5):
post_list = Post.objects.all().order_by('-click_count')[:num]
return post_list
@register.simple_tag
def get_recent_post(num=5):
post_list = Post.objects.all()[:num]
return post_list
@register.simple_tag
def get_blog_owner():
owner = cache.get('owner', None)
if not owner:
from blog.models import User
user = User.objects.get(id=1)
post_num = len(user.post_set.all())
view_num = 0
for post in user.post_set.all():
view_num += post.click_count
owner = {'name': user.username, 'post_num': post_num, 'view_num': view_num, 'avatar': user.avatar.url}
cache.set('owner', owner, timeout=24*60*60)
return owner
@register.simple_tag
def get_friend_links():
links = FriendLink.objects.all()
return links
|
{
"content_hash": "ed480a088a84d0d46a7a87ecf8b4d570",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 110,
"avg_line_length": 25.942307692307693,
"alnum_prop": 0.6641957005189029,
"repo_name": "r26zhao/django_blog",
"id": "256b5bce98a0ccefadb6f31058c7419fe7808cd3",
"size": "1349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/templatetags/blog_tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "240260"
},
{
"name": "Python",
"bytes": "770615"
}
],
"symlink_target": ""
}
|
import time
import post
from api.api_helpers.common_helper import *
from api.api_helpers.user_helper import *
from util.StringBuilder import *
def create(ds, **args):
required(['username', 'name', 'email', 'about'], args)
optional('isAnonymous', args, False)
#ds.close_all()
conn = ds.get_db()
db = conn['conn']
c = db.cursor()
try:
c.execute(u"""INSERT INTO user (username, name, email, about, isAnonymous)
VALUES (%s, %s, %s, %s, %s)""",
(args['username'], args['name'], args['email'],
args['about'], int(args['isAnonymous'])))
_id = db.insert_id()
db.commit()
except Exception as e:
db.rollback()
raise e
finally:
c.close()
db.close() #db.close() #ds.close(conn['id'])
user_data = {
'about': args['about'],
'email': args['email'],
'id': _id,
'isAnonymous': False,
'name': args['name'],
'username': args['username']
}
return user_data
def details(ds, **args):
required(['user'], args)
conn = ds.get_db()
db = conn['conn']
c = db.cursor()
c.execute(u"""SELECT * FROM user
WHERE email = %s""", (args['user'],))
user_data = c.fetchone()
c.close()
check_empty(user_data, u"No user found with that email")
make_boolean(['isAnonymous'], user_data)
user_data['followers'] = listFollowers(ds, handler=echo_email, user=args['user'])
user_data['following'] = listFollowing(ds, handler=echo_email, user=args['user'])
# getting subscriptions
c = db.cursor()
c.execute(u"""SELECT thread_id FROM subscriptions
WHERE user_id = %s""", (user_data['id'],))
user_data['subscriptions'] = [s['thread_id'] for s in c]
c.close()
ds.close(conn['id'])
return user_data
def list_followers_following(ds, who, handler, **args):
required(['user'], args)
optional('limit', args)
optional('order', args, 'desc', ['desc', 'asc'])
optional('since_id', args)
possibles = ['follower', 'followee']
val = 0 if who == 'follower' else 1
def next_val(v):
return (v + 1) % len(possibles)
user_id = get_id_by_email(ds, args['user'])
query = StringBuilder()
query.append(u"""SELECT email FROM followers
INNER JOIN user ON followers.%s = user.id
WHERE %s """ % (possibles[val], possibles[next_val(val)])
+ u"""= %s""")
params = (user_id, )
if args['since_id']:
query.append(u"""AND %s """ % (possibles[val],) + u""">= %s""")
params += (args['since_id'],)
if args['order']:
query.append(u"""ORDER BY user.name %s""" % args['order'])
if args['limit']:
query.append(u"""LIMIT %d""" % int(args['limit']))
conn = ds.get_db()
db = conn['conn']
c = db.cursor()
c.execute(str(query), params)
res = [handler(ds, row['email']) for row in c]
c.close()
ds.close(conn['id'])
return res
def listFollowers(ds, handler=get_info_by_email, **args):
return list_followers_following(ds, 'follower', handler, **args)
def listFollowing(ds, handler=get_info_by_email, **args):
return list_followers_following(ds, 'followee', handler, **args)
def listPosts(ds, **args):
return post.list(ds, **args)
def follow(ds, **args):
required(['follower', 'followee'], args)
follower_id = get_id_by_email(ds, args['follower'])
followee_id = get_id_by_email(ds, args['followee'])
params = (follower_id, followee_id)
query = u"""INSERT INTO followers (follower, followee)
VALUES (%s, %s)"""
ds.close_all()
conn = ds.get_db()
db = conn['conn']
c = db.cursor()
try:
c.execute(query, params)
db.commit()
except Exception as e:
db.rollback()
raise e
finally:
c.close()
ds.close(conn['id'])
return details(ds, user=args['follower'])
def unfollow(ds, **args):
required(['follower', 'followee'], args)
follower_id = get_id_by_email(ds, args['follower'])
followee_id = get_id_by_email(ds, args['followee'])
params = (follower_id, followee_id)
ds.close_all()
conn = ds.get_db()
db = conn['conn']
c = db.cursor()
try:
c.execute(u"""DELETE FROM followers
WHERE follower = %s AND followee = %s""",
params)
db.commit()
except Exception as e:
db.rollback()
raise e
finally:
c.close()
ds.close(conn['id'])
return details(ds, user=args['follower'])
def updateProfile(ds, **args):
required(['about', 'user', 'name'], args)
ds.close_all()
conn = ds.get_db()
db = conn['conn']
c = db.cursor()
try:
c.execute(u"""UPDATE user
SET about = %s,
name = %s
WHERE email = %s""",
(args['about'], args['name'], args['user']))
db.commit()
except Exception as e:
db.rollback()
raise e
finally:
c.close()
ds.close(conn['id'])
return details(ds, user=args['user'])
|
{
"content_hash": "38f569d16a0b5a19006b53726ee396b4",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 85,
"avg_line_length": 25.55121951219512,
"alnum_prop": 0.5399007254677358,
"repo_name": "igorcoding/forum-api",
"id": "b3520aa4330ed1d317b50e18516abb0c31e5f592",
"size": "5238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "608"
},
{
"name": "Python",
"bytes": "48289"
},
{
"name": "SQL",
"bytes": "6263"
},
{
"name": "Shell",
"bytes": "4615"
}
],
"symlink_target": ""
}
|
import unittest
import paddle
import paddle.distributed.fleet as fleet
import paddle.distributed.fleet.base.role_maker as role_maker
import os
import paddle.fluid as fluid
import numpy as np
class TestFleetBase(unittest.TestCase):
def setUp(self):
os.environ["POD_IP"] = "127.0.0.1"
os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36000"
os.environ["PADDLE_TRAINERS_NUM"] = "2"
os.environ[
"PADDLE_PSERVERS_IP_PORT_LIST"
] = "127.0.0.1:36001,127.0.0.2:36002"
def test_init(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
def test_is_first_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
if fleet.is_first_worker():
print("test fleet first worker done.")
def test_worker_index(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
print(fleet.worker_index())
def test_worker_num(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
print(fleet.worker_num())
def test_is_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
if fleet.is_worker():
print("test fleet is worker")
def test_worker_endpoints(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
self.assertEqual(
"127.0.0.1:36000", fleet.worker_endpoints(to_string=True)
)
self.assertEqual(["127.0.0.1:36000"], fleet.worker_endpoints())
def test_server_num(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
os.environ["PADDLE_PORT"] = "36001"
os.environ["POD_IP"] = "127.0.0.1"
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
os.environ["PADDLE_TRAINERS_NUM"] = "2"
self.assertEqual(2, fleet.server_num())
def test_server_index(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
os.environ["PADDLE_PORT"] = "36001"
os.environ["POD_IP"] = "127.0.0.1"
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
self.assertEqual(0, fleet.server_index())
def test_server_endpoints(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
os.environ["PADDLE_PORT"] = "36001"
os.environ["POD_IP"] = "127.0.0.1"
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
if fleet.is_server():
self.assertEqual(
"127.0.0.1:36001,127.0.0.2:36002",
fleet.server_endpoints(to_string=True),
)
self.assertEqual(
["127.0.0.1:36001", "127.0.0.2:36002"], fleet.server_endpoints()
)
def test_is_server(self):
os.environ["TRAINING_ROLE"] = "PSERVER"
os.environ["PADDLE_PORT"] = "36001"
os.environ["POD_IP"] = "127.0.0.1"
role = role_maker.PaddleCloudRoleMaker()
fleet.init(role)
self.assertTrue(fleet.is_server())
def test_util(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
self.assertIsNotNone(fleet.util)
def test_barrier_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
if fleet.is_worker():
fleet.barrier_worker()
def test_init_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
with self.assertRaises(ValueError):
if fleet.is_worker():
fleet.init_worker()
def test_stop_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
with self.assertRaises(ValueError):
if fleet.is_worker():
fleet.stop_worker()
def test_distributed_optimizer(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
optimizer = paddle.optimizer.SGD(learning_rate=0.001)
optimizer = fleet.distributed_optimizer(optimizer)
def test_exception(self):
import paddle.distributed.fleet as fleet
self.assertRaises(Exception, fleet.init_worker)
class TestFleetDygraph(unittest.TestCase):
def setUp(self):
os.environ[
"PADDLE_TRAINER_ENDPOINTS"
] = "127.0.0.1:36213,127.0.0.1:36214"
os.environ["PADDLE_CURRENT_ENDPOINTS"] = "127.0.0.1:36213"
os.environ["PADDLE_TRAINERS_NUM"] = "2"
os.environ["PADDLE_TRAINER_ID"] = "0"
def test_dygraph_method(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = fluid.dygraph.to_variable(value)
layer = paddle.nn.Linear(13, 5)
adam = paddle.optimizer.Adam(
learning_rate=0.01, parameters=layer.parameters()
)
# remove init cause this UT cannot launch distributed task
adam = fleet.distributed_optimizer(adam)
try:
dp_layer = fleet.distributed_model(layer)
except Exception as e:
# This is just for testing the interface,
# and will not actually be called. Therefore,
# use "try-except" to avoid errors.
lr = 0.001
adam.set_lr(lr)
cur_lr = adam.get_lr()
assert lr == cur_lr
state_dict = adam.state_dict()
adam.set_state_dict(state_dict)
final_strategy = fleet._final_strategy()
class TestFleetBaseSingleError(unittest.TestCase):
def setUp(self):
os.environ.pop("PADDLE_TRAINER_ENDPOINTS")
def gen_data(self):
return {
"x": np.random.random(size=(128, 32)).astype('float32'),
"y": np.random.randint(2, size=(128, 1)).astype('int64'),
}
def test_single_run_collective_minimize(self):
def test_single_error():
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = fluid.layers.fc(input=input_x, size=64, act='tanh')
prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
avg_cost = paddle.mean(x=cost)
fleet.init(is_collective=True)
# in non_distributed mode(use `python` to launch), raise error if has multi cards
if (
fluid.core.is_compiled_with_cuda()
and fluid.core.get_cuda_device_count() > 1
):
self.assertRaises(ValueError, test_single_error)
else:
test_single_error()
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "b26f22e89742db1981043d2d8130c6e4",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 89,
"avg_line_length": 33.88292682926829,
"alnum_prop": 0.5957385545637777,
"repo_name": "luotao1/Paddle",
"id": "686c7fa1ef75a260f78bde89cee8aa637825d911",
"size": "7557",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_fleet_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
# This program is public domain
"""
Parameter expression evaluator.
For systems in which constraints are expressed as string expressions rather
than python code, :func:`compile_constraints` can construct an expression
evaluator that substitutes the computed values of the expressions into the
parameters.
The compiler requires a symbol table, an expression set and a context.
The symbol table maps strings containing fully qualified names such as
'M1.c[3].full_width' to parameter objects with a 'value' property that
can be queried and set. The expression set maps symbol names from the
symbol table to string expressions. The context provides additional symbols
for the expressions in addition to the usual mathematical functions and
constants.
The expressions are compiled and interpreted by python, with only minimal
effort to make sure that they don't contain bad code. The resulting
constraints function returns 0 so it can be used directly in a fit problem
definition.
Extracting the symbol table from the model depends on the structure of the
model. If fitness.parameters() is set correctly, then this should simply
be a matter of walking the parameter data, remembering the path to each
parameter in the symbol table. For compactness, dictionary elements should
be referenced by .name rather than ["name"]. Model name can be used as the
top level.
Getting the parameter expressions applied correctly is challenging.
The following monkey patch works by overriding model_update in FitProblem
so that after setp(p) is called and, the constraints expression can be
applied before telling the underlying fitness function that the model
is out of date::
# Override model update so that parameter constraints are applied
problem._model_update = problem.model_update
def model_update():
constraints()
problem._model_update()
problem.model_update = model_update
Ideally, this interface will change
"""
import math
import re
# simple pattern which matches symbols. Note that it will also match
# invalid substrings such as a3...9, but given syntactically correct
# input it will only match symbols.
_symbol_pattern = re.compile('([a-zA-Z_][a-zA-Z_0-9.]*)')
def _symbols(expr,symtab):
"""
Given an expression string and a symbol table, return the set of symbols
used in the expression. Symbols are only returned once even if they
occur multiple times. The return value is a set with the elements in
no particular order.
This is the first step in computing a dependency graph.
"""
matches = [m.group(0) for m in _symbol_pattern.finditer(expr)]
return set([symtab[m] for m in matches if m in symtab])
def _substitute(expr,mapping):
"""
Replace all occurrences of symbol s with mapping[s] for s in mapping.
"""
# Find the symbols and the mapping
matches = [(m.start(),m.end(),mapping[m.group(1)])
for m in _symbol_pattern.finditer(expr)
if m.group(1) in mapping]
# Split the expression in to pieces, with new symbols replacing old
pieces = []
offset = 0
for start,end,text in matches:
pieces += [expr[offset:start],text]
offset = end
pieces.append(expr[offset:])
# Join the pieces and return them
return "".join(pieces)
def _find_dependencies(symtab, exprs):
"""
Returns a list of pair-wise dependencies from the parameter expressions.
For example, if p3 = p1+p2, then find_dependencies([p1,p2,p3]) will
return [(p3,p1),(p3,p2)]. For base expressions without dependencies,
such as p4 = 2*pi, this should return [(p4, None)]
"""
deps = [(target,source)
for target,expr in exprs.items()
for source in _symbols_or_none(expr,symtab)]
return deps
# Hack to deal with expressions without dependencies --- return a fake
# dependency of None.
# The better solution is fix order_dependencies so that it takes a
# dictionary of {symbol: dependency_list}, for which no dependencies
# is simply []; fix in parameter_mapping as well
def _symbols_or_none(expr,symtab):
syms = _symbols(expr,symtab)
return syms if len(syms) else [None]
def _parameter_mapping(pairs):
"""
Find the parameter substitution we need so that expressions can
be evaluated without having to traverse a chain of
model.layer.parameter.value
"""
left,right = zip(*pairs)
pars = list(sorted(p for p in set(left+right) if p is not None))
definition = dict( ('P%d'%i,p) for i,p in enumerate(pars) )
# p is None when there is an expression with no dependencies
substitution = dict( (p,'P%d.value'%i)
for i,p in enumerate(sorted(pars))
if p is not None)
return definition, substitution
def no_constraints():
"""
This parameter set has no constraints between the parameters.
"""
pass
def compile_constraints(symtab, exprs, context={}):
"""
Build and return a function to evaluate all parameter expressions in
the proper order.
Input:
*symtab* is the symbol table for the model: { 'name': parameter }
*exprs* is the set of computed symbols: { 'name': 'expression' }
*context* is any additional context needed to evaluate the expression
Return:
updater function which sets parameter.value for each expression
Raises:
AssertionError - model, parameter or function is missing
SyntaxError - improper expression syntax
ValueError - expressions have circular dependencies
This function is not terribly sophisticated, and it would be easy to
trick. However it handles the common cases cleanly and generates
reasonable messages for the common errors.
This code has not been fully audited for security. While we have
removed the builtins and the ability to import modules, there may
be other vectors for users to perform more than simple function
evaluations. Unauthenticated users should not be running this code.
Parameter names are assumed to contain only _.a-zA-Z0-9#[]
Both names are provided for inverse functions, e.g., acos and arccos.
Should try running the function to identify syntax errors before
running it in a fit.
Use help(fn) to see the code generated for the returned function fn.
dis.dis(fn) will show the corresponding python vm instructions.
"""
# Sort the parameters in the order they need to be evaluated
deps = _find_dependencies(symtab, exprs)
if deps == []: return no_constraints
order = order_dependencies(deps)
# Rather than using the full path to the parameters in the parameter
# expressions, instead use Pn, and substitute Pn.value for each occurrence
# of the parameter in the expression.
names = list(sorted(symtab.keys()))
parameters = dict(('P%d'%i, symtab[k]) for i,k in enumerate(names))
mapping = dict((k, 'P%d.value'%i) for i,k in enumerate(names))
# Initialize dictionary with available functions
globals = {}
globals.update(math.__dict__)
globals.update(dict(arcsin=math.asin,arccos=math.acos,
arctan=math.atan,arctan2=math.atan2))
globals.update(context)
globals.update(parameters)
globals['id'] = id
locals = {}
# Define the constraints function
assignments = ["=".join((p,exprs[p])) for p in order]
code = [_substitute(s, mapping) for s in assignments]
functiondef = """
def eval_expressions():
'''
%s
'''
%s
return 0
"""%("\n ".join(assignments),"\n ".join(code))
#print("Function: "+functiondef)
exec functiondef in globals,locals
retfn = locals['eval_expressions']
# Remove garbage added to globals by exec
globals.pop('__doc__',None)
globals.pop('__name__',None)
globals.pop('__file__',None)
globals.pop('__builtins__')
#print globals.keys()
return retfn
def order_dependencies(pairs):
"""
Order elements from pairs so that b comes before a in the
ordered list for all pairs (a,b).
"""
#print "order_dependencies",pairs
emptyset = set()
order = []
# Break pairs into left set and right set
left,right = [set(s) for s in zip(*pairs)] if pairs != [] else ([],[])
while pairs != []:
#print "within",pairs
# Find which items only occur on the right
independent = right - left
if independent == emptyset:
cycleset = ", ".join(str(s) for s in left)
raise ValueError,"Cyclic dependencies amongst %s"%cycleset
# The possibly resolvable items are those that depend on the independents
dependent = set([a for a,b in pairs if b in independent])
pairs = [(a,b) for a,b in pairs if b not in independent]
if pairs == []:
resolved = dependent
else:
left,right = [set(s) for s in zip(*pairs)]
resolved = dependent - left
#print "independent",independent,"dependent",dependent,"resolvable",resolved
order += resolved
#print "new order",order
order.reverse()
return order
# ========= Test code ========
def _check(msg,pairs):
"""
Verify that the list n contains the given items, and that the list
satisfies the partial ordering given by the pairs in partial order.
"""
left,right = zip(*pairs) if pairs != [] else ([],[])
items = set(left)
n = order_dependencies(pairs)
if set(n) != items or len(n) != len(items):
n.sort()
items = list(items); items.sort()
raise Exception,"%s expect %s to contain %s for %s"%(msg,n,items,pairs)
for lo,hi in pairs:
if lo in n and hi in n and n.index(lo) >= n.index(hi):
raise Exception,"%s expect %s before %s in %s for %s"%(msg,lo,hi,n,pairs)
def test_deps():
import numpy as np
# Null case
_check("test empty",[])
# Some dependencies
_check("test1",[(2,7),(1,5),(1,4),(2,1),(3,1),(5,6)])
_check("test1 renumbered",[(6,1),(7,3),(7,4),(6,7),(5,7),(3,2)])
_check("test1 numpy",np.array([(2,7),(1,5),(1,4),(2,1),(3,1),(5,6)]))
# No dependencies
_check("test2",[(4,1),(3,2),(8,4)])
# Cycle test
pairs = [(1,4),(4,3),(4,5),(5,1)]
try: n = order_dependencies(pairs)
except ValueError: pass
else: raise Exception,"test3 expect ValueError exception for %s"%(pairs,)
# large test for gross speed check
A = np.random.randint(4000,size=(1000,2))
A[:,1] += 4000 # Avoid cycles
_check("test-large",A)
# depth tests
k = 200
A = np.array([range(0,k),range(1,k+1)]).T
_check("depth-1",A)
A = np.array([range(1,k+1),range(0,k)]).T
_check("depth-2",A)
def test_expr():
import inspect, dis
import math
symtab = {'a.b.x':1, 'a.c':2, 'a.b':3, 'b.x':4}
expr = 'a.b.x + sin(4*pi*a.c) + a.b.x/a.b'
# Check symbol lookup
assert _symbols(expr, symtab) == set([1,2,3])
# Check symbol rename
assert _substitute(expr,{'a.b.x':'Q'}) == 'Q + sin(4*pi*a.c) + Q/a.b'
assert _substitute(expr,{'a.b':'Q'}) == 'a.b.x + sin(4*pi*a.c) + a.b.x/Q'
# Check dependency builder
# Fake parameter class
class Parameter:
def __init__(self, name, value=0, expression=''):
self.path = name
self.value = value
self.expression = expression
def iscomputed(self): return (self.expression != '')
def __repr__(self): return self.path
def world(*pars):
symtab = dict((p.path,p) for p in pars)
exprs = dict((p.path,p.expression) for p in pars if p.iscomputed())
return symtab, exprs
p1 = Parameter('G0.sigma',5)
p2 = Parameter('other',expression='2*pi*sin(G0.sigma/.1875) + M1.G1')
p3 = Parameter('M1.G1',6)
p4 = Parameter('constant',expression='2*pi*35')
# Simple chain
assert set(_find_dependencies(*world(p1,p2,p3))) == set([(p2.path,p1),(p2.path,p3)])
# Constant expression
assert set(_find_dependencies(*world(p1,p4))) == set([(p4.path,None)])
# No dependencies
assert set(_find_dependencies(*world(p1,p3))) == set([])
# Check function builder
fn = compile_constraints(*world(p1,p2,p3))
# Inspect the resulting function
if 0:
print(inspect.getdoc(fn))
print(dis.dis(fn))
# Evaluate the function and see if it updates the
# target value as expected
fn()
expected = 2*math.pi*math.sin(5/.1875) + 6
assert p2.value == expected,"Value was %s, not %s"%(p2.value,expected)
# Check empty dependency set doesn't crash
fn = compile_constraints(*world(p1,p3))
fn()
# Check that constants are evaluated properly
fn = compile_constraints(*world(p4))
fn()
assert p4.value == 2*math.pi*35
# Check additional context example; this also tests multiple
# expressions
class Table:
Si = 2.09
values = {'Si': 2.07}
tbl = Table()
p5 = Parameter('lookup',expression="tbl.Si")
fn = compile_constraints(*world(p1,p2,p3,p5),context=dict(tbl=tbl))
fn()
assert p5.value == 2.09,"Value for %s was %s"%(p5.expression,p5.value)
p5.expression = "tbl.values['Si']"
fn = compile_constraints(*world(p1,p2,p3,p5),context=dict(tbl=tbl))
fn()
assert p5.value == 2.07,"Value for %s was %s"%(p5.expression,p5.value)
# Verify that we capture invalid expressions
for expr in ['G4.cage', 'M0.cage', 'M1.G1 + *2',
'piddle',
'5; import sys; print "p0wned"',
'__import__("sys").argv']:
try:
p6 = Parameter('broken',expression=expr)
fn = compile_constraints(*world(p6))
fn()
except Exception as msg:
#print(msg)
pass
else:
raise "Failed to raise error for %s"%expr
if __name__ == "__main__":
test_expr()
test_deps()
|
{
"content_hash": "245cad576989b8571afdda3279bd7dbf",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 88,
"avg_line_length": 34.84577114427861,
"alnum_prop": 0.6424186179326099,
"repo_name": "lewisodriscoll/sasview",
"id": "c99740efbb2026da7ba3048ee725eedf8cec42e7",
"size": "14008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sas/sascalc/pr/fit/expression.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AutoIt",
"bytes": "5122"
},
{
"name": "Batchfile",
"bytes": "9544"
},
{
"name": "C",
"bytes": "79248"
},
{
"name": "C++",
"bytes": "228413"
},
{
"name": "HTML",
"bytes": "9252"
},
{
"name": "Makefile",
"bytes": "28052"
},
{
"name": "Python",
"bytes": "3696992"
},
{
"name": "Shell",
"bytes": "12936"
}
],
"symlink_target": ""
}
|
from ctypes import c_int, c_char
from warnings import warn
import openmc.exceptions as exc
from . import _dll
def _error_handler(err, func, args):
"""Raise exception according to error code."""
# Get error code corresponding to global constant.
def errcode(s):
return c_int.in_dll(_dll, s).value
# Get error message set by OpenMC library
errmsg = (c_char*256).in_dll(_dll, 'openmc_err_msg')
msg = errmsg.value.decode()
# Raise exception type corresponding to error code
if err == errcode('OPENMC_E_ALLOCATE'):
raise exc.AllocationError(msg)
elif err == errcode('OPENMC_E_OUT_OF_BOUNDS'):
raise exc.OutOfBoundsError(msg)
elif err == errcode('OPENMC_E_INVALID_ARGUMENT'):
raise exc.InvalidArgumentError(msg)
elif err == errcode('OPENMC_E_INVALID_TYPE'):
raise exc.InvalidTypeError(msg)
if err == errcode('OPENMC_E_INVALID_ID'):
raise exc.InvalidIDError(msg)
elif err == errcode('OPENMC_E_GEOMETRY'):
raise exc.GeometryError(msg)
elif err == errcode('OPENMC_E_DATA'):
raise exc.DataError(msg)
elif err == errcode('OPENMC_E_PHYSICS'):
raise exc.PhysicsError(msg)
elif err == errcode('OPENMC_E_WARNING'):
warn(msg)
elif err < 0:
raise exc.OpenMCError("Unknown error encountered (code {}).".format(err))
|
{
"content_hash": "8fb63fb12167897f8c582917cc6499a7",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 81,
"avg_line_length": 34.87179487179487,
"alnum_prop": 0.6544117647058824,
"repo_name": "wbinventor/openmc",
"id": "b35de4e60c9aadc9da2b845b2e9b98089357aebd",
"size": "1360",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "openmc/capi/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9675"
},
{
"name": "C++",
"bytes": "1289928"
},
{
"name": "CMake",
"bytes": "11264"
},
{
"name": "Dockerfile",
"bytes": "1427"
},
{
"name": "Python",
"bytes": "2653785"
},
{
"name": "Shell",
"bytes": "2519"
}
],
"symlink_target": ""
}
|
import logging
import os
import subprocess
import sys
import virtualenv
from .activate_this import write_activate_this
LOGGER = logging.getLogger(__name__)
PREFIX = getattr(sys, "prefix", None)
REAL_PREFIX = getattr(sys, "real_prefix", None)
def install_virtualenv(install_dir):
if is_virtualenv(install_dir):
return
subprocess.call(
[
sys.executable,
virtualenv.__file__.rstrip("c"),
"--no-site-packages",
"--always-copy",
install_dir,
]
)
write_activate_this(install_dir)
VIRTUALENV_FILES = {"activate file": os.path.join("bin", "activate")}
def is_virtualenv(path):
""" validate if the path is already a virtualenv """
for name, venv_path in VIRTUALENV_FILES.items():
target_path = os.path.join(path, venv_path)
if not os.path.exists(target_path):
return False
return True
|
{
"content_hash": "2737e0dfee341311a9657efba6244547",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 69,
"avg_line_length": 23.71794871794872,
"alnum_prop": 0.6237837837837837,
"repo_name": "toumorokoshi/uranium",
"id": "db9c83b5ca8ee96841ac064411c6b452e15cdb82",
"size": "925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uranium/lib/sandbox/venv/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "346"
},
{
"name": "Python",
"bytes": "106526"
}
],
"symlink_target": ""
}
|
"""Mock:
CNRead.describe(session, pid) → DescribeResponse
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/CN_APIs.html#CNRead.describe
MNRead.describe(session, pid) → DescribeResponse
https://releases.dataone.org/online/api-documentation-v2.0.1/apis/MN_APIs.html#MNRead.describe
A DataONEException can be triggered by adding a custom header. See d1_exception.py
"""
import logging
import re
import responses
import d1_common.checksum
import d1_common.const
import d1_common.date_time
import d1_common.url
import d1_test.instance_generator.sciobj
import d1_test.mock_api.d1_exception
import d1_test.mock_api.util
# Config
DESCRIBE_ENDPOINT_RX = r"v([123])/object/(.*)"
def add_callback(base_url):
responses.add_callback(
responses.HEAD,
re.compile(
r"^" + d1_common.url.joinPathElements(base_url, DESCRIBE_ENDPOINT_RX)
),
callback=_request_callback,
content_type="",
)
def _request_callback(request):
logging.debug('Received callback. url="{}"'.format(request.url))
# Return DataONEException if triggered
exc_response_tup = d1_test.mock_api.d1_exception.trigger_by_header(request)
if exc_response_tup:
return exc_response_tup
# Return NotFound
pid, client = _parse_url(request.url)
if pid.startswith("<NotFound>"):
return d1_test.mock_api.d1_exception.trigger_by_status_code(request, 404)
# Return regular response
pid, sid, sciobj_bytes, sysmeta_pyxb = d1_test.instance_generator.sciobj.generate_reproducible_sciobj_with_sysmeta(
client, pid
)
header_dict = _create_headers(sciobj_bytes, sysmeta_pyxb)
return 200, header_dict, ""
def _parse_url(url):
version_tag, endpoint_str, param_list, query_dict, client = d1_test.mock_api.util.parse_rest_url(
url
)
assert endpoint_str == "object"
assert len(param_list) == 1, "describe() accepts a single parameter, the PID"
return param_list[0], client
def _create_headers(sciobj_bytes, sysmeta_pyxb):
checksum_pyxb = d1_common.checksum.create_checksum_object_from_bytes(sciobj_bytes)
return {
"Content-Length": str(sysmeta_pyxb.size),
"Content-Type": d1_common.const.CONTENT_TYPE_OCTET_STREAM,
"Last-Modified": str(d1_common.date_time.utc_now()),
"DataONE-FormatId": sysmeta_pyxb.formatId,
"DataONE-Checksum": "{},{}".format(
checksum_pyxb.algorithm, checksum_pyxb.value()
),
"DataONE-SerialVersion": "3",
}
|
{
"content_hash": "261d82be90d79adc817e78c9cfdcdfd8",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 119,
"avg_line_length": 31.5625,
"alnum_prop": 0.6887128712871287,
"repo_name": "DataONEorg/d1_python",
"id": "f2f8e0a9700ffce7c4f3e7ca425624d2a7bc6eab",
"size": "3340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_utilities/src/d1_test/mock_api/describe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4798"
},
{
"name": "HTML",
"bytes": "13358"
},
{
"name": "Inno Setup",
"bytes": "3430"
},
{
"name": "JavaScript",
"bytes": "2068"
},
{
"name": "Python",
"bytes": "3547939"
},
{
"name": "Shell",
"bytes": "5670"
},
{
"name": "XSLT",
"bytes": "89205"
}
],
"symlink_target": ""
}
|
import os
import pytest
from wrimg.devices import Device
@pytest.fixture
def blkpath():
return '/dev/sdg'
@pytest.fixture
def regpath():
return 'setup.py'
@pytest.fixture
def regdev(regpath):
return Device(regpath)
@pytest.fixture
def blkdev(blkpath):
return Device(blkpath)
def test_fixtures_exist(blkpath, regpath):
assert os.path.exists(blkpath)
assert os.path.exists(regpath)
def test_is_device_reg(regdev):
assert regdev.is_device is False
def test_is_device_blk(blkdev):
assert blkdev.is_device is True
def test_removable(blkdev):
assert blkdev.is_removable is True
|
{
"content_hash": "41b1a8517623db03f4f483ae0f156484",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 42,
"avg_line_length": 14.904761904761905,
"alnum_prop": 0.7204472843450479,
"repo_name": "mbr/wrimg",
"id": "2a98d70bdb48fdc49f4a3484be036c2a3c82a9de",
"size": "626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14282"
}
],
"symlink_target": ""
}
|
from log import init_logger
class PathMapper(object):
def __init__(self, mapping):
super(PathMapper, self).__init__()
self.mapping = mapping
self.logger = init_logger('PathMapper')
def map(self, path):
potential_keys = [key for key in self.mapping.keys() if path.startswith(key)]
potential_keys.sort(key=len, reverse=True)
if len(potential_keys) > 0:
resulting_path = self.build_resulting_path(potential_keys[0], path)
self.logger.info("Path %s mapped to %s" % (path, resulting_path))
return resulting_path
else:
return path
def build_resulting_path(self, match, path):
mapped_match = self.mapping[match]
return mapped_match.join(path.rsplit(match))
|
{
"content_hash": "590b57619f75dd8d00f5a87ffa1e6b31",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 85,
"avg_line_length": 35.72727272727273,
"alnum_prop": 0.6183206106870229,
"repo_name": "miguelaferreira/transmission-postprocess",
"id": "19c8d910ca0d318963db31562b5990f80cbc45ce",
"size": "786",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trdone/path_mapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "889"
},
{
"name": "Python",
"bytes": "21805"
},
{
"name": "Shell",
"bytes": "1298"
}
],
"symlink_target": ""
}
|
from factory import Sequence, PostGenerationMethodCall
from factory.alchemy import SQLAlchemyModelFactory
from vBurgundy.user.models import User
from vBurgundy.database import db
class BaseFactory(SQLAlchemyModelFactory):
class Meta:
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
username = Sequence(lambda n: "user{0}".format(n))
email = Sequence(lambda n: "user{0}@example.com".format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
model = User
|
{
"content_hash": "a0437a724eaa9d7f9e67d09b13f818ec",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 66,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.729776247848537,
"repo_name": "michaelrice/vBurgundy",
"id": "b0c20b1d47e4fe933b039dcc0229b342e4764f90",
"size": "605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/factories.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "198994"
},
{
"name": "JavaScript",
"bytes": "240900"
},
{
"name": "Python",
"bytes": "30193"
}
],
"symlink_target": ""
}
|
"""Tests for discussions API"""
# pylint: disable=redefined-outer-name
from django.core.exceptions import ImproperlyConfigured
from django.db.models.signals import post_save
from opensearch_dsl import Search
from factory.django import mute_signals
from open_discussions_api.constants import ROLE_STAFF
import pytest
from requests.exceptions import HTTPError
from requests import Response
from rest_framework import status as statuses
from courses.factories import ProgramFactory
from dashboard.factories import ProgramEnrollmentFactory
from discussions import api
from discussions.exceptions import (
ChannelAlreadyExistsException,
ChannelCreationException,
ContributorSyncException,
DiscussionUserSyncException,
ModeratorSyncException,
SubscriberSyncException,
)
from discussions.factories import (
ChannelFactory,
ChannelProgramFactory,
DiscussionUserFactory,
)
from discussions.models import (
Channel,
ChannelProgram,
DiscussionUser,
)
from profiles.factories import (
ProfileFactory,
UserFactory,
)
from roles.factories import RoleFactory
from roles.roles import Staff
from search.models import (
PercolateQuery,
PercolateQueryMembership,
)
pytestmark = [
pytest.mark.usefixtures('mocked_opensearch'),
pytest.mark.usefixtures('mocked_on_commit'),
pytest.mark.django_db,
]
# pylint: disable=too-many-locals, unused-argument
@pytest.fixture
def mock_staff_client(mocker):
"""Mocks the staff client"""
return mocker.patch('discussions.api.get_staff_client').return_value
@pytest.mark.parametrize("secret, base_url, username", [
(None, 'base_url', 'username'),
('secret', None, 'username'),
('secret', 'base_url', None),
])
def test_get_staff_client_config_errors(settings, secret, base_url, username):
"""Assert that get_staff_client raises config errors"""
settings.OPEN_DISCUSSIONS_JWT_SECRET = secret
settings.OPEN_DISCUSSIONS_BASE_URL = base_url
settings.OPEN_DISCUSSIONS_API_USERNAME = username
with pytest.raises(ImproperlyConfigured):
api.get_staff_client()
def test_get_staff_client_config_valid(settings):
"""Test that get_staff_client returns a configured client"""
settings.OPEN_DISCUSSIONS_JWT_SECRET = 'secret'
settings.OPEN_DISCUSSIONS_BASE_URL = 'base_url'
settings.OPEN_DISCUSSIONS_API_USERNAME = 'username'
assert api.get_staff_client().roles == [ROLE_STAFF]
def test_create_or_update_discussion_user_no_username(mocker):
"""Test that create_or_update_discussion_user creates if we don't have a username"""
create_mock = mocker.patch('discussions.api.create_discussion_user')
update_mock = mocker.patch('discussions.api.update_discussion_user')
with mute_signals(post_save):
profile = ProfileFactory.create()
assert DiscussionUser.objects.count() == 0
api.create_or_update_discussion_user(profile.user_id)
assert create_mock.call_count == 1
assert update_mock.call_count == 0
assert DiscussionUser.objects.count() == 1
@pytest.mark.parametrize('enable_update', [True, False])
def test_create_or_update_discussion_user_has_username(mocker, enable_update, settings):
"""Test that create_or_update_discussion_user updates if we have a username"""
settings.FEATURES['OPEN_DISCUSSIONS_USER_UPDATE'] = enable_update
create_mock = mocker.patch('discussions.api.create_discussion_user')
update_mock = mocker.patch('discussions.api.update_discussion_user')
with mute_signals(post_save):
profile = ProfileFactory.create()
DiscussionUser.objects.create(user=profile.user, username='username')
api.create_or_update_discussion_user(profile.user_id)
assert create_mock.call_count == 0
assert update_mock.call_count == (1 if enable_update else 0)
assert DiscussionUser.objects.count() == 1
def test_create_discussion_user(mock_staff_client):
"""Verify create_discussion_user makes the correct API calls"""
mock_response = mock_staff_client.users.create.return_value
mock_response.status_code = 201
mock_response.json.return_value = {
'username': 'username'
}
with mute_signals(post_save):
profile = ProfileFactory.create()
discussion_user = DiscussionUser.objects.create(user=profile.user)
api.create_discussion_user(discussion_user)
assert discussion_user.username == 'username'
mock_staff_client.users.create.assert_called_once_with(
profile.user.username,
email=profile.user.email,
profile=dict(
name=profile.full_name,
image=profile.image.url if profile.image else None,
image_small=profile.image_small.url if profile.image_small else None,
image_medium=profile.image_medium.url if profile.image_medium else None,
email_optin=profile.email_optin
)
)
def test_create_discussion_user_error(mock_staff_client):
"""Verify create_discussion_user handles non 2xx status codes"""
mock_staff_client.users.create.return_value.raise_for_status.side_effect = HTTPError
with mute_signals(post_save):
profile = ProfileFactory.create()
discussion_user = DiscussionUser.objects.create(user=profile.user)
with pytest.raises(DiscussionUserSyncException) as exc:
api.create_discussion_user(discussion_user)
assert str(exc.value) == "Error creating discussion user for {}".format(profile.user.username)
def test_update_discussion_user(mock_staff_client):
"""Verify update_discussion_user makes the correct API calls"""
mock_response = mock_staff_client.users.update.return_value
mock_response.status_code = 200
mock_response.json.return_value = {
'username': 'username'
}
with mute_signals(post_save):
profile = ProfileFactory.create()
discussion_user = DiscussionUser.objects.create(user=profile.user, username='username')
api.update_discussion_user(discussion_user)
mock_staff_client.users.update.assert_called_once_with(
discussion_user.username,
uid=discussion_user.user.username,
email=profile.user.email,
profile=dict(
name=profile.full_name,
image=profile.image.url if profile.image else None,
image_small=profile.image_small.url if profile.image_small else None,
image_medium=profile.image_medium.url if profile.image_medium else None,
)
)
def test_update_discussion_user_with_email_optin(mock_staff_client):
"""Verify update_discussion_user makes the correct API calls"""
mock_response = mock_staff_client.users.update.return_value
mock_response.status_code = 200
mock_response.json.return_value = {
'username': 'username'
}
with mute_signals(post_save):
profile = ProfileFactory.create()
discussion_user = DiscussionUser.objects.create(user=profile.user, username='username')
api.update_discussion_user(discussion_user, allow_email_optin=True)
mock_staff_client.users.update.assert_called_once_with(
discussion_user.username,
uid=discussion_user.user.username,
email=profile.user.email,
profile=dict(
name=profile.full_name,
image=profile.image.url if profile.image else None,
image_small=profile.image_small.url if profile.image_small else None,
image_medium=profile.image_medium.url if profile.image_medium else None,
email_optin=profile.email_optin
)
)
def test_update_discussion_user_no_update(mock_staff_client):
"""Verify update_discussion_user makes the correct API calls"""
with mute_signals(post_save):
profile = ProfileFactory.create()
discussion_user = DiscussionUser.objects.create(user=profile.user, username='user1', last_sync=profile.updated_on)
api.update_discussion_user(discussion_user)
assert mock_staff_client.users.update.call_count == 0
def test_update_discussion_user_error(mock_staff_client):
"""Verify update_discussion_user handles non-2xx status codes"""
mock_staff_client.users.update.return_value.raise_for_status.side_effect = HTTPError
with mute_signals(post_save):
profile = ProfileFactory.create()
discussion_user = DiscussionUser.objects.create(user=profile.user, username='username')
with pytest.raises(DiscussionUserSyncException) as exc:
api.update_discussion_user(discussion_user)
assert str(exc.value) == "Error updating discussion user for {}".format(profile.user.username)
def test_add_to_channel(mock_staff_client):
"""add_to_channel should add user as contributor and subscriber"""
channel_name = 'channel'
discussion_username = 'username'
api.add_to_channel(channel_name, discussion_username)
mock_staff_client.channels.add_contributor.assert_called_once_with(channel_name, discussion_username)
mock_staff_client.channels.add_subscriber.assert_called_once_with(channel_name, discussion_username)
def test_add_to_channel_failed_contributor(mock_staff_client):
"""add_to_channel should raise an exception if it fails to add a contributor"""
mock_staff_client.channels.add_contributor.return_value.raise_for_status.side_effect = HTTPError
with pytest.raises(ContributorSyncException) as ex:
api.add_to_channel('channel', 'user')
assert ex.value.args[0] == 'Error adding contributor user to channel channel'
assert mock_staff_client.channels.add_subscriber.called is False
def test_add_to_channel_failed_subscriber(mock_staff_client):
"""add_to_channel should raise an exception if it fails to add a subscriber"""
channel_name = 'channel'
discussion_username = 'username'
mock_staff_client.channels.add_subscriber.return_value.raise_for_status.side_effect = HTTPError
with pytest.raises(SubscriberSyncException) as ex:
api.add_to_channel(channel_name, discussion_username)
assert ex.value.args[0] == 'Error adding subscriber {user} to channel {channel}'.format(
user=discussion_username,
channel=channel_name,
)
mock_staff_client.channels.add_contributor.assert_called_once_with(channel_name, discussion_username)
mock_staff_client.channels.add_subscriber.assert_called_once_with(channel_name, discussion_username)
@pytest.mark.parametrize("contributor_status_code,subscriber_status_code", [
(statuses.HTTP_200_OK, statuses.HTTP_200_OK),
(statuses.HTTP_404_NOT_FOUND, statuses.HTTP_404_NOT_FOUND),
(statuses.HTTP_409_CONFLICT, statuses.HTTP_404_NOT_FOUND),
])
def test_remove_from_channel(mock_staff_client, contributor_status_code, subscriber_status_code):
"""remove_from_channel should remove a user's contributor and subscriber status"""
channel_name = 'channel'
discussion_username = 'username'
api.remove_from_channel(channel_name, discussion_username)
mock_staff_client.channels.remove_contributor.assert_called_once_with(channel_name, discussion_username)
mock_staff_client.channels.remove_subscriber.assert_called_once_with(channel_name, discussion_username)
@pytest.mark.parametrize("status_code", [
statuses.HTTP_400_BAD_REQUEST,
statuses.HTTP_401_UNAUTHORIZED,
statuses.HTTP_403_FORBIDDEN,
statuses.HTTP_500_INTERNAL_SERVER_ERROR,
statuses.HTTP_505_HTTP_VERSION_NOT_SUPPORTED
])
def test_remove_from_channel_failed_contributor(mock_staff_client, status_code):
"""
remove_from_channel should raise an exception if it fails to remove a user's contributor status,
depending on the status code
"""
channel_name = 'channel'
discussion_username = 'user'
response = mock_staff_client.channels.remove_contributor.return_value
response.ok = False
response.status_code = status_code
response.raise_for_status.side_effect = HTTPError
with pytest.raises(ContributorSyncException) as ex:
api.remove_from_channel(channel_name, discussion_username)
assert ex.value.args[0] == 'Unable to remove a contributor user from channel channel'
mock_staff_client.channels.remove_contributor.assert_called_once_with(channel_name, discussion_username)
mock_staff_client.channels.remove_subscriber.assert_called_once_with(channel_name, discussion_username)
@pytest.mark.parametrize("status_code", [
statuses.HTTP_400_BAD_REQUEST,
statuses.HTTP_401_UNAUTHORIZED,
statuses.HTTP_403_FORBIDDEN,
statuses.HTTP_409_CONFLICT,
statuses.HTTP_500_INTERNAL_SERVER_ERROR,
statuses.HTTP_505_HTTP_VERSION_NOT_SUPPORTED
])
def test_remove_from_channel_failed_subscriber(mock_staff_client, status_code):
"""
remove_from_channel should raise an exception if it fails to remove a user's subscriber status,
depending on the status code
"""
mock_staff_client.channels.remove_contributor.return_value.ok = True
response = mock_staff_client.channels.remove_subscriber.return_value
response.ok = False
response.status_code = status_code
response.raise_for_status.side_effect = HTTPError
channel_name = 'channel'
discussion_username = 'username'
with pytest.raises(SubscriberSyncException) as ex:
api.remove_from_channel(channel_name, discussion_username)
assert ex.value.args[0] == 'Unable to remove a subscriber username from channel channel'
mock_staff_client.channels.remove_subscriber.assert_called_once_with(channel_name, discussion_username)
assert mock_staff_client.channels.remove_contributor.called is False
def test_get_membership_ids_needing_sync(patched_users_api):
"""
Tests that get_membership_ids_needing_sync only returns ids for the correct records
"""
user1 = UserFactory.create()
user2 = UserFactory.create()
user3 = UserFactory.create()
with mute_signals(post_save):
user3.profile.delete()
member_channels = [ChannelFactory.create() for _ in range(4)]
nonmember_channels = [ChannelFactory.create() for _ in range(3)]
# these should show up in results
memberships_to_add = [
PercolateQueryMembership.objects.create(user=user1, query=channel.query, needs_update=True, is_member=True)
for channel in member_channels
]
memberships_to_remove = [
PercolateQueryMembership.objects.create(user=user1, query=channel.query, needs_update=True, is_member=False)
for channel in nonmember_channels
]
# these shouldn't show up in results
memberships_add_no_update = [
PercolateQueryMembership.objects.create(user=user2, query=channel.query, needs_update=False, is_member=True)
for channel in member_channels
]
memberships_remove_no_update = [
PercolateQueryMembership.objects.create(user=user2, query=channel.query, needs_update=False, is_member=False)
for channel in nonmember_channels
]
memberships_add_no_profile = [
PercolateQueryMembership.objects.create(user=user3, query=channel.query, needs_update=True, is_member=True)
for channel in member_channels
]
memberships_remove_no_profile = [
PercolateQueryMembership.objects.create(user=user3, query=channel.query, needs_update=True, is_member=False)
for channel in nonmember_channels
]
results = api.get_membership_ids_needing_sync()
for membership in memberships_to_add + memberships_to_remove:
assert membership.id in results
for membership in (
memberships_add_no_update + memberships_remove_no_update +
memberships_add_no_profile + memberships_remove_no_profile
):
assert membership.id not in results
def test_ordering_get_membership_ids_needing_sync(patched_users_api):
"""Test that get_membership_ids_needing_sync returns ordered list based on is_member (True before False)
and updated_on (most recent first)."""
users = [UserFactory.create() for _ in range(4)]
channel = ChannelFactory.create()
memberships_is_member_true = [
PercolateQueryMembership.objects.create(user=user, query=channel.query, needs_update=True, is_member=True)
for user in users[:2]
]
memberships_is_member_false = [
PercolateQueryMembership.objects.create(user=user, query=channel.query, needs_update=True, is_member=False)
for user in users[2:]
]
memberships_is_member_true.reverse()
memberships_is_member_false.reverse()
expected_order = []
for membership in memberships_is_member_true + memberships_is_member_false:
expected_order.append(membership.id)
results = api.get_membership_ids_needing_sync()
assert expected_order == list(results)
def test_sync_channel_memberships(mocker, patched_users_api):
"""
sync_user_to_channels should add or remove the user's membership from channels, not touching channels where
the user is a moderator of at least one program
"""
user = UserFactory.create()
# member here means the user matches the percolate query of the channel
member_channels = [ChannelFactory.create() for _ in range(4)]
nonmember_channels = [ChannelFactory.create() for _ in range(3)]
# first channel of members and first channel of nonmembers are skipped since user is staff
channels_to_add = member_channels[1:]
channels_to_remove = nonmember_channels[1:]
# User is a staff of some channels and not of others.
# Note that a staff user may or may not match the percolate query or a channel
staff_programs = [
ChannelProgramFactory.create(channel=member_channels[0]).program,
ChannelProgramFactory.create(channel=nonmember_channels[0]).program,
]
non_staff_programs = [
ChannelProgramFactory.create(channel=channel).program
for channel in (channels_to_add + channels_to_remove)
]
memberships_to_add = [
PercolateQueryMembership.objects.create(user=user, query=channel.query, needs_update=True, is_member=True)
for channel in member_channels
]
memberships_to_remove = [
PercolateQueryMembership.objects.create(user=user, query=channel.query, needs_update=True, is_member=False)
for channel in nonmember_channels
]
for program in staff_programs:
with mute_signals(post_save):
RoleFactory.create(program=program, user=user, role=Staff.ROLE_ID)
# Enroll the user in all programs. This isn't technically required but it's unrealistic to have a query
# matching a user if they are not enrolled in the program.
for program in staff_programs + non_staff_programs:
ProgramEnrollmentFactory.create(program=program, user=user)
# One percolate query per channel
assert PercolateQuery.objects.count() == len(member_channels) + len(nonmember_channels)
add_subscriber_stub = mocker.patch(
'discussions.api.add_subscriber_to_channel',
autospec=True,
)
add_contributor_stub = mocker.patch(
'discussions.api.add_contributor_to_channel',
autospec=True,
)
remove_subscriber_stub = mocker.patch(
'discussions.api.remove_subscriber_from_channel',
autospec=True,
)
remove_contributor_stub = mocker.patch(
'discussions.api.remove_contributor_from_channel',
autospec=True,
)
api.sync_channel_memberships(api.get_membership_ids_needing_sync())
created_stub, _ = patched_users_api
created_stub.assert_any_call(user.discussion_user)
assert add_subscriber_stub.call_count == len(channels_to_add)
assert add_contributor_stub.call_count == len(channels_to_add)
assert remove_subscriber_stub.call_count == len(channels_to_remove)
assert remove_contributor_stub.call_count == len(channels_to_remove)
for membership in memberships_to_add + memberships_to_remove:
membership.refresh_from_db()
assert membership.needs_update is False
for channel in channels_to_add:
add_subscriber_stub.assert_any_call(channel.name, user.discussion_user.username)
add_contributor_stub.assert_any_call(channel.name, user.discussion_user.username)
for channel in channels_to_remove:
remove_contributor_stub.assert_any_call(channel.name, user.discussion_user.username)
remove_subscriber_stub.assert_any_call(channel.name, user.discussion_user.username)
def test_sync_channel_memberships_api_error(mocker, patched_users_api):
"""
sync_user_to_channels should not fail hard on a sync exception
"""
user = UserFactory.create()
# member here means the user matches the percolate query of the channel
channels_to_add = [ChannelFactory.create() for _ in range(4)]
channels_to_remove = [ChannelFactory.create() for _ in range(3)]
programs = [
ChannelProgramFactory.create(channel=channel).program
for channel in (channels_to_add + channels_to_remove)
]
memberships_to_add = [
PercolateQueryMembership.objects.create(user=user, query=channel.query, needs_update=True, is_member=True)
for channel in channels_to_add
]
memberships_to_remove = [
PercolateQueryMembership.objects.create(user=user, query=channel.query, needs_update=True, is_member=False)
for channel in channels_to_remove
]
# Enroll the user in all programs. This isn't technically required but it's unrealistic to have a query
# matching a user if they are not enrolled in the program.
for program in programs:
ProgramEnrollmentFactory.create(program=program, user=user)
# One percolate query per channel
assert PercolateQuery.objects.count() == len(channels_to_add) + len(channels_to_remove)
# these are the first calls to be made for either change
add_contributor_stub = mocker.patch(
'discussions.api.add_contributor_to_channel',
autospec=True,
side_effect=DiscussionUserSyncException
)
remove_subscriber_stub = mocker.patch(
'discussions.api.remove_subscriber_from_channel',
autospec=True,
side_effect=DiscussionUserSyncException
)
api.sync_channel_memberships(api.get_membership_ids_needing_sync())
created_stub, _ = patched_users_api
created_stub.assert_any_call(user.discussion_user)
assert add_contributor_stub.call_count == len(channels_to_add)
assert remove_subscriber_stub.call_count == len(channels_to_remove)
# should still need updates since everything failed
for membership in memberships_to_add + memberships_to_remove:
membership.refresh_from_db()
assert membership.needs_update is True
for channel in channels_to_add:
add_contributor_stub.assert_any_call(channel.name, user.discussion_user.username)
for channel in channels_to_remove:
remove_subscriber_stub.assert_any_call(channel.name, user.discussion_user.username)
def test_add_channel(settings, mock_staff_client, mocker, patched_users_api):
"""add_channel should tell open-discussions to create a channel"""
mock_staff_client.channels.create.return_value.ok = True
settings.FEATURES['OPEN_DISCUSSIONS_USER_UPDATE'] = True
title = "title"
name = "name"
description = "description"
channel_type = "private"
input_search = Search.from_dict({"unmodified": "search"})
modified_search = Search.from_dict({"result": "modified"})
adjust_search_for_percolator_stub = mocker.patch(
'discussions.api.adjust_search_for_percolator',
autospec=True,
return_value=modified_search,
)
program = ProgramFactory.create()
contributors = [UserFactory.create() for _ in range(5)]
for user in contributors:
ProgramEnrollmentFactory.create(user=user, program=program)
populate_memberships_task_stub = mocker.patch('search.api.populate_query_memberships', autospec=True)
add_moderators_task_stub = mocker.patch('discussions.api.add_moderators_to_channel', autospec=True)
add_subscriber_stub = mocker.patch('discussions.api.add_subscriber_to_channel', autospec=True)
add_moderator_stub = mocker.patch('discussions.api.add_moderator_to_channel', autospec=True)
mod = UserFactory.create()
channel = api.add_channel(
original_search=input_search,
title=title,
name=name,
description=description,
channel_type=channel_type,
program_id=program.id,
creator_id=mod.id,
)
mock_staff_client.channels.create.assert_called_once_with(
title=title,
name=name,
description=description,
channel_type=channel_type,
)
adjust_search_for_percolator_stub.assert_called_once_with(input_search)
assert channel.name == name
query = channel.query
assert query.source_type == PercolateQuery.DISCUSSION_CHANNEL_TYPE
assert query.original_query == input_search.to_dict()
assert query.query == modified_search.to_dict()
assert ChannelProgram.objects.count() == 1
channel_program = ChannelProgram.objects.first()
assert channel_program.program == program
assert channel_program.channel == channel
populate_memberships_task_stub.assert_called_once_with(query.id)
add_moderators_task_stub.assert_called_once_with(channel.name)
add_subscriber_stub.assert_called_once_with(channel.name, mod.discussion_user.username)
add_moderator_stub.assert_called_once_with(channel.name, mod.discussion_user.username)
_, updated_stub = patched_users_api
updated_stub.assert_any_call(mod.discussion_user, allow_email_optin=False)
def test_add_channel_failed_create_channel(mock_staff_client, mocker):
"""If client.channels.create fails an exception should be raised"""
response_500 = Response()
response_500.status_code = statuses.HTTP_500_INTERNAL_SERVER_ERROR
mock_staff_client.channels.create.return_value.raise_for_status.side_effect = HTTPError(response=response_500)
with pytest.raises(ChannelCreationException) as ex:
api.add_channel(
Search.from_dict({}),
"title",
"name",
"description",
"channel_type",
123,
456,
)
assert ex.value.args[0] == "Error creating channel name"
mock_staff_client.channels.create.return_value.raise_for_status.assert_called_with()
assert mock_staff_client.channels.create.call_count == 1
assert PercolateQuery.objects.count() == 0
assert Channel.objects.count() == 0
def test_add_channel_channel_already_exists(mock_staff_client, patched_users_api):
"""Channel already exists with that channel name"""
response_409 = Response()
response_409.status_code = statuses.HTTP_409_CONFLICT
mock_staff_client.channels.create.return_value = response_409
title = "title"
name = "name"
description = "public description"
channel_type = "private"
input_search = Search.from_dict({"unmodified": "search"})
role = RoleFactory.create()
mod = UserFactory.create()
with pytest.raises(ChannelAlreadyExistsException):
api.add_channel(
original_search=input_search,
title=title,
name=name,
description=description,
channel_type=channel_type,
program_id=role.program.id,
creator_id=mod.id,
)
mock_staff_client.channels.create.assert_called_once_with(
title=title,
name=name,
description=description,
channel_type=channel_type,
)
def test_add_moderators_to_channel(mocker, patched_users_api):
"""add_moderators_to_channel should add staff or instructors as moderators and subscribers"""
channel = ChannelFactory.create()
mods = []
for _ in range(3):
program = ChannelProgramFactory.create(channel=channel).program
with mute_signals(post_save):
mods += [
RoleFactory.create(
program=program,
user=ProfileFactory.create().user
).user for _ in range(5)
]
for __ in range(5):
# Add some users to the channel to show that being part of the channel is not enough to be added as a mod
ProgramEnrollmentFactory.create(program=program)
create_stub, _ = patched_users_api
create_stub.reset_mock()
add_subscriber_stub = mocker.patch('discussions.api.add_subscriber_to_channel', autospec=True)
add_moderator_stub = mocker.patch('discussions.api.add_moderator_to_channel', autospec=True)
api.add_moderators_to_channel(channel.name)
for mod in mods:
add_subscriber_stub.assert_any_call(channel.name, mod.discussion_user.username)
add_moderator_stub.assert_any_call(channel.name, mod.discussion_user.username)
create_stub.assert_any_call(mod.discussion_user)
assert add_subscriber_stub.call_count == len(mods)
assert add_moderator_stub.call_count == len(mods)
assert create_stub.call_count == len(mods)
def test_add_moderator_to_channel(mock_staff_client):
"""add_moderator_to_channel should add a moderator to a channel"""
api.add_moderator_to_channel('channel', 'user')
mock_staff_client.channels.add_moderator.assert_called_once_with('channel', 'user')
def test_add_moderator_to_channel_failed(mock_staff_client):
"""If there's a non-2xx status code, add_moderator_to_channel raise an exception"""
mock_staff_client.channels.add_moderator.return_value.raise_for_status.side_effect = HTTPError
with pytest.raises(ModeratorSyncException):
api.add_moderator_to_channel('channel', 'user')
mock_staff_client.channels.add_moderator.assert_called_once_with('channel', 'user')
def test_remove_moderator_from_channel(mock_staff_client):
"""remove_moderator_from_channel should remove a moderator from a channel"""
api.remove_moderator_from_channel('channel', 'user')
mock_staff_client.channels.remove_moderator.assert_called_once_with('channel', 'user')
def test_add_and_sub_moderator_to_channel(mocker):
"""add_moderators_to_channel should add user as moderators and subscribers"""
channel = ChannelFactory.create()
add_subscriber_stub = mocker.patch('discussions.api.add_subscriber_to_channel', autospec=True)
add_moderator_stub = mocker.patch('discussions.api.add_moderator_to_channel', autospec=True)
discussion_user = DiscussionUserFactory.create()
# call api
api.add_and_subscribe_moderator(discussion_user.username, channel.name)
add_subscriber_stub.assert_any_call(channel.name, discussion_user.username)
add_moderator_stub.assert_any_call(channel.name, discussion_user.username)
|
{
"content_hash": "0d77ea1a50618835719ddd7b1b85e928",
"timestamp": "",
"source": "github",
"line_count": 739,
"max_line_length": 118,
"avg_line_length": 41.37347767253045,
"alnum_prop": 0.7136222403924776,
"repo_name": "mitodl/micromasters",
"id": "44c008e055a815ac0d3960513ffe522e2f21cc44",
"size": "30575",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "discussions/api_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9764"
},
{
"name": "Dockerfile",
"bytes": "958"
},
{
"name": "HTML",
"bytes": "84519"
},
{
"name": "JavaScript",
"bytes": "1462849"
},
{
"name": "Procfile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "2098424"
},
{
"name": "SCSS",
"bytes": "135082"
},
{
"name": "Shell",
"bytes": "10764"
}
],
"symlink_target": ""
}
|
"""The tests for the emulated Hue component."""
from http import HTTPStatus
import json
import unittest
from aiohttp import web
import defusedxml.ElementTree as ET
import pytest
from homeassistant import setup
from homeassistant.components import emulated_hue
from homeassistant.components.emulated_hue import upnp
from homeassistant.const import CONTENT_TYPE_JSON
from tests.common import get_test_instance_port
BRIDGE_SERVER_PORT = get_test_instance_port()
class MockTransport:
"""Mock asyncio transport."""
def __init__(self):
"""Create a place to store the sends."""
self.sends = []
def sendto(self, response, addr):
"""Mock sendto."""
self.sends.append((response, addr))
@pytest.fixture
def aiohttp_client(loop, aiohttp_client, socket_enabled):
"""Return aiohttp_client and allow opening sockets."""
return aiohttp_client
@pytest.fixture
def hue_client(aiohttp_client):
"""Return a hue API client."""
app = web.Application()
with unittest.mock.patch(
"homeassistant.components.emulated_hue.web.Application", return_value=app
):
async def client():
"""Return an authenticated client."""
return await aiohttp_client(app)
yield client
async def setup_hue(hass):
"""Set up the emulated_hue integration."""
assert await setup.async_setup_component(
hass,
emulated_hue.DOMAIN,
{emulated_hue.DOMAIN: {emulated_hue.CONF_LISTEN_PORT: BRIDGE_SERVER_PORT}},
)
def test_upnp_discovery_basic():
"""Tests the UPnP basic discovery response."""
upnp_responder_protocol = upnp.UPNPResponderProtocol(None, None, "192.0.2.42", 8080)
mock_transport = MockTransport()
upnp_responder_protocol.transport = mock_transport
"""Original request emitted by the Hue Bridge v1 app."""
request = """M-SEARCH * HTTP/1.1
HOST:239.255.255.250:1900
ST:ssdp:all
Man:"ssdp:discover"
MX:3
"""
encoded_request = request.replace("\n", "\r\n").encode("utf-8")
upnp_responder_protocol.datagram_received(encoded_request, 1234)
expected_response = """HTTP/1.1 200 OK
CACHE-CONTROL: max-age=60
EXT:
LOCATION: http://192.0.2.42:8080/description.xml
SERVER: FreeRTOS/6.0.5, UPnP/1.0, IpBridge/1.16.0
hue-bridgeid: 001788FFFE23BFC2
ST: urn:schemas-upnp-org:device:basic:1
USN: uuid:2f402f80-da50-11e1-9b23-001788255acc
"""
expected_send = expected_response.replace("\n", "\r\n").encode("utf-8")
assert mock_transport.sends == [(expected_send, 1234)]
def test_upnp_discovery_rootdevice():
"""Tests the UPnP rootdevice discovery response."""
upnp_responder_protocol = upnp.UPNPResponderProtocol(None, None, "192.0.2.42", 8080)
mock_transport = MockTransport()
upnp_responder_protocol.transport = mock_transport
"""Original request emitted by Busch-Jaeger free@home SysAP."""
request = """M-SEARCH * HTTP/1.1
HOST: 239.255.255.250:1900
MAN: "ssdp:discover"
MX: 40
ST: upnp:rootdevice
"""
encoded_request = request.replace("\n", "\r\n").encode("utf-8")
upnp_responder_protocol.datagram_received(encoded_request, 1234)
expected_response = """HTTP/1.1 200 OK
CACHE-CONTROL: max-age=60
EXT:
LOCATION: http://192.0.2.42:8080/description.xml
SERVER: FreeRTOS/6.0.5, UPnP/1.0, IpBridge/1.16.0
hue-bridgeid: 001788FFFE23BFC2
ST: upnp:rootdevice
USN: uuid:2f402f80-da50-11e1-9b23-001788255acc::upnp:rootdevice
"""
expected_send = expected_response.replace("\n", "\r\n").encode("utf-8")
assert mock_transport.sends == [(expected_send, 1234)]
def test_upnp_no_response():
"""Tests the UPnP does not response on an invalid request."""
upnp_responder_protocol = upnp.UPNPResponderProtocol(None, None, "192.0.2.42", 8080)
mock_transport = MockTransport()
upnp_responder_protocol.transport = mock_transport
"""Original request emitted by the Hue Bridge v1 app."""
request = """INVALID * HTTP/1.1
HOST:239.255.255.250:1900
ST:ssdp:all
Man:"ssdp:discover"
MX:3
"""
encoded_request = request.replace("\n", "\r\n").encode("utf-8")
upnp_responder_protocol.datagram_received(encoded_request, 1234)
assert mock_transport.sends == []
async def test_description_xml(hass, hue_client):
"""Test the description."""
await setup_hue(hass)
client = await hue_client()
result = await client.get("/description.xml", timeout=5)
assert result.status == HTTPStatus.OK
assert "text/xml" in result.headers["content-type"]
try:
root = ET.fromstring(await result.text())
ns = {"s": "urn:schemas-upnp-org:device-1-0"}
assert root.find("./s:device/s:serialNumber", ns).text == "001788FFFE23BFC2"
except: # noqa: E722 pylint: disable=bare-except
pytest.fail("description.xml is not valid XML!")
async def test_create_username(hass, hue_client):
"""Test the creation of an username."""
await setup_hue(hass)
client = await hue_client()
request_json = {"devicetype": "my_device"}
result = await client.post("/api", data=json.dumps(request_json), timeout=5)
assert result.status == HTTPStatus.OK
assert CONTENT_TYPE_JSON in result.headers["content-type"]
resp_json = await result.json()
success_json = resp_json[0]
assert "success" in success_json
assert "username" in success_json["success"]
async def test_unauthorized_view(hass, hue_client):
"""Test unauthorized view."""
await setup_hue(hass)
client = await hue_client()
request_json = {"devicetype": "my_device"}
result = await client.get(
"/api/unauthorized", data=json.dumps(request_json), timeout=5
)
assert result.status == HTTPStatus.OK
assert CONTENT_TYPE_JSON in result.headers["content-type"]
resp_json = await result.json()
assert len(resp_json) == 1
success_json = resp_json[0]
assert len(success_json) == 1
assert "error" in success_json
error_json = success_json["error"]
assert len(error_json) == 3
assert "/" in error_json["address"]
assert "unauthorized user" in error_json["description"]
assert "1" in error_json["type"]
async def test_valid_username_request(hass, hue_client):
"""Test request with a valid username."""
await setup_hue(hass)
client = await hue_client()
request_json = {"invalid_key": "my_device"}
result = await client.post("/api", data=json.dumps(request_json), timeout=5)
assert result.status == HTTPStatus.BAD_REQUEST
|
{
"content_hash": "5f32391692499b8edf8479248d76f24d",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 88,
"avg_line_length": 29.925925925925927,
"alnum_prop": 0.6836324257425742,
"repo_name": "rohitranjan1991/home-assistant",
"id": "ec04ee7e19ca178a53e080a0619f3392a113e329",
"size": "6464",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/components/emulated_hue/test_upnp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class SubscriptionClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for SubscriptionClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
"""
def __init__(self, credential: "AsyncTokenCredential", **kwargs: Any) -> None:
super(SubscriptionClientConfiguration, self).__init__(**kwargs)
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
self.credential = credential
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-subscription/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
|
{
"content_hash": "c1f5cd64dc0b441d3f645e30cd978a77",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 107,
"avg_line_length": 52.808510638297875,
"alnum_prop": 0.7264302981466559,
"repo_name": "Azure/azure-sdk-for-python",
"id": "c3e5601d1d620ecca829505194e46337c50f871d",
"size": "2950",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/subscription/azure-mgmt-subscription/azure/mgmt/subscription/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import math
import time
import krpc
turn_start_altitude = 250
turn_end_altitude = 45000
target_altitude = 150000
conn = krpc.connect(name='Launch into orbit')
vessel = conn.space_center.active_vessel
# Set up streams for telemetry
ut = conn.add_stream(getattr, conn.space_center, 'ut')
altitude = conn.add_stream(getattr, vessel.flight(), 'mean_altitude')
apoapsis = conn.add_stream(getattr, vessel.orbit, 'apoapsis_altitude')
stage_3_resources = vessel.resources_in_decouple_stage(stage=3, cumulative=False)
srb_fuel = conn.add_stream(stage_3_resources.amount, 'SolidFuel')
# Pre-launch setup
vessel.control.sas = False
vessel.control.rcs = False
vessel.control.throttle = 1.0
# Countdown...
print('3...')
time.sleep(1)
print('2...')
time.sleep(1)
print('1...')
time.sleep(1)
print('Launch!')
# Activate the first stage
vessel.control.activate_next_stage()
vessel.auto_pilot.engage()
vessel.auto_pilot.target_pitch_and_heading(90, 90)
# Main ascent loop
srbs_separated = False
turn_angle = 0
while True:
# Gravity turn
if altitude() > turn_start_altitude and altitude() < turn_end_altitude:
frac = ((altitude() - turn_start_altitude) /
(turn_end_altitude - turn_start_altitude))
new_turn_angle = frac * 90
if abs(new_turn_angle - turn_angle) > 0.5:
turn_angle = new_turn_angle
vessel.auto_pilot.target_pitch_and_heading(90-turn_angle, 90)
# Separate SRBs when finished
if not srbs_separated:
if srb_fuel() < 0.1:
vessel.control.activate_next_stage()
srbs_separated = True
print('SRBs separated')
# Decrease throttle when approaching target apoapsis
if apoapsis() > target_altitude*0.9:
print('Approaching target apoapsis')
break
# Disable engines when target apoapsis is reached
vessel.control.throttle = 0.25
while apoapsis() < target_altitude:
pass
print('Target apoapsis reached')
vessel.control.throttle = 0.0
# Wait until out of atmosphere
print('Coasting out of atmosphere')
while altitude() < 70500:
pass
# Plan circularization burn (using vis-viva equation)
print('Planning circularization burn')
mu = vessel.orbit.body.gravitational_parameter
r = vessel.orbit.apoapsis
a1 = vessel.orbit.semi_major_axis
a2 = r
v1 = math.sqrt(mu*((2./r)-(1./a1)))
v2 = math.sqrt(mu*((2./r)-(1./a2)))
delta_v = v2 - v1
node = vessel.control.add_node(
ut() + vessel.orbit.time_to_apoapsis, prograde=delta_v)
# Calculate burn time (using rocket equation)
F = vessel.available_thrust
Isp = vessel.specific_impulse * 9.82
m0 = vessel.mass
m1 = m0 / math.exp(delta_v/Isp)
flow_rate = F / Isp
burn_time = (m0 - m1) / flow_rate
# Orientate ship
print('Orientating ship for circularization burn')
vessel.auto_pilot.reference_frame = node.reference_frame
vessel.auto_pilot.target_direction = (0, 1, 0)
vessel.auto_pilot.wait()
# Wait until burn
print('Waiting until circularization burn')
burn_ut = ut() + vessel.orbit.time_to_apoapsis - (burn_time/2.)
lead_time = 5
conn.space_center.warp_to(burn_ut - lead_time)
# Execute burn
print('Ready to execute burn')
time_to_apoapsis = conn.add_stream(getattr, vessel.orbit, 'time_to_apoapsis')
while time_to_apoapsis() - (burn_time/2.) > 0:
pass
print('Executing burn')
vessel.control.throttle = 1.0
time.sleep(burn_time - 0.1)
print('Fine tuning')
vessel.control.throttle = 0.05
remaining_burn = conn.add_stream(node.remaining_burn_vector, node.reference_frame)
while remaining_burn()[1] > 0:
pass
vessel.control.throttle = 0.0
node.remove()
print('Launch complete')
|
{
"content_hash": "6726125cede10945bdbdd4f473672710",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 82,
"avg_line_length": 28.870967741935484,
"alnum_prop": 0.702513966480447,
"repo_name": "mkluge/ksp-console",
"id": "043ea57fb2c658a148053454ae17574a6056a0a8",
"size": "3580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-code/LaunchIntoOrbit.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1894"
},
{
"name": "Python",
"bytes": "73743"
},
{
"name": "Shell",
"bytes": "480"
}
],
"symlink_target": ""
}
|
import glob
import os
import re
import sys
# It's fragile to rely on the location of this script to find the top-level
# source directory.
TOP_LEVEL_DIRECTORY = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
WEBKIT_LIBRARIES = os.environ['WEBKIT_LIBRARIES'];
def main():
react_to_vsprops_changes()
react_to_webkit1_interface_changes()
def react_to_vsprops_changes():
vsprops_directory = os.path.join(WEBKIT_LIBRARIES, 'tools', 'vsprops')
newest_vsprops_time = mtime_of_newest_file_matching_glob(os.path.join(vsprops_directory, '*.props'))
obj_directory = os.path.join(os.environ['CONFIGURATIONBUILDDIR'], 'obj32')
# Visual Studio isn't smart enough to figure out it needs to rebuild these file types when
# .vsprops files change (even if we touch wtf/Platform.h below), so we delete them to force them
# to be rebuilt.
for extension in ('dep', 'manifest', 'pch', 'res'):
for filepath in glob.iglob(os.path.join(obj_directory, '*', '*.%s' % extension)):
delete_if_older_than(filepath, newest_vsprops_time)
# Touch wtf/Platform.h so all files will be recompiled. This is necessary
# to pick up changes to preprocessor macros (e.g., ENABLE_*).
wtf_platform_h = os.path.join(TOP_LEVEL_DIRECTORY, 'Source', 'WTF', 'wtf', 'Platform.h')
touch_if_older_than(wtf_platform_h, newest_vsprops_time)
def react_to_webkit1_interface_changes():
interfaces_directory = os.path.join(TOP_LEVEL_DIRECTORY, 'Source', 'WebKit', 'win', 'Interfaces')
newest_idl_time = mtime_of_newest_file_matching_glob(os.path.join(interfaces_directory, '*.idl'))
# WebKit.idl includes all the other IDL files, so needs to be rebuilt if any IDL file changes.
# But Visual Studio isn't smart enough to figure this out, so we touch WebKit.idl to ensure that
# it gets rebuilt.
touch_if_older_than(os.path.join(interfaces_directory, 'WebKit.idl'), newest_idl_time)
def mtime_of_newest_file_matching_glob(glob_pattern):
files = glob.glob(glob_pattern)
assert len(files), "Couldn't find any files matching glob %s" % glob_pattern
return max(map(os.path.getmtime, files))
def delete_if_older_than(path, reference_time):
if os.path.getmtime(path) < reference_time:
print 'Deleting %s' % path
os.remove(path)
def touch_if_older_than(path, reference_time):
if os.path.getmtime(path) < reference_time:
print 'Touching %s' % path
os.utime(path, None)
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "a7ba9641138d49f90530494980676a2f",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 115,
"avg_line_length": 40.03125,
"alnum_prop": 0.6928181108508977,
"repo_name": "gubaojian/trylearn",
"id": "51d650f7fbc5ac4914ebe3d299e74e798878731c",
"size": "2585",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "WebCore/Source/WTF/WTF.vcxproj/work-around-vs-dependency-tracking-bugs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AspectJ",
"bytes": "623"
},
{
"name": "Assembly",
"bytes": "1942"
},
{
"name": "Batchfile",
"bytes": "6632"
},
{
"name": "C",
"bytes": "6629351"
},
{
"name": "C++",
"bytes": "57418677"
},
{
"name": "CMake",
"bytes": "1269316"
},
{
"name": "CSS",
"bytes": "99559"
},
{
"name": "HTML",
"bytes": "283332"
},
{
"name": "Java",
"bytes": "267448"
},
{
"name": "JavaScript",
"bytes": "282026"
},
{
"name": "Makefile",
"bytes": "164797"
},
{
"name": "Objective-C",
"bytes": "956074"
},
{
"name": "Objective-C++",
"bytes": "3645713"
},
{
"name": "Perl",
"bytes": "192119"
},
{
"name": "Python",
"bytes": "39191"
},
{
"name": "Ragel",
"bytes": "128173"
},
{
"name": "Roff",
"bytes": "26536"
},
{
"name": "Ruby",
"bytes": "32784"
},
{
"name": "Shell",
"bytes": "7177"
},
{
"name": "Vue",
"bytes": "1776"
},
{
"name": "Yacc",
"bytes": "11866"
}
],
"symlink_target": ""
}
|
"""Module containing the Chrome stages."""
from __future__ import print_function
import glob
import multiprocessing
import os
import shutil
from chromite.cbuildbot import commands
from chromite.cbuildbot import goma_util
from chromite.cbuildbot import manifest_version
from chromite.cbuildbot.stages import artifact_stages
from chromite.cbuildbot.stages import generic_stages
from chromite.cbuildbot.stages import sync_stages
from chromite.lib import config_lib
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import failures_lib
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import path_util
from chromite.lib import portage_util
from chromite.lib import results_lib
MASK_CHANGES_ERROR_SNIPPET = 'The following mask changes are necessary'
CHROMEPIN_MASK_PATH = os.path.join(constants.SOURCE_ROOT,
constants.CHROMIUMOS_OVERLAY_DIR,
'profiles', 'default', 'linux',
'package.mask', 'chromepin')
class SyncChromeStage(generic_stages.BuilderStage,
generic_stages.ArchivingStageMixin):
"""Stage that syncs Chrome sources if needed."""
option_name = 'managed_chrome'
category = constants.PRODUCT_CHROME_STAGE
def __init__(self, builder_run, buildstore, **kwargs):
super(SyncChromeStage, self).__init__(builder_run, buildstore, **kwargs)
# PerformStage() will fill this out for us.
# TODO(mtennant): Replace with a run param.
self.chrome_version = None
def HandleSkip(self):
"""Set run.attrs.chrome_version to chrome version in buildroot now."""
self._run.attrs.chrome_version = self._run.DetermineChromeVersion()
logging.debug('Existing chrome version is %s.',
self._run.attrs.chrome_version)
self._WriteChromeVersionToMetadata()
super(SyncChromeStage, self).HandleSkip()
def _GetChromeVersionFromMetadata(self):
"""Return the Chrome version from metadata; None if is does not exist."""
version_dict = self._run.attrs.metadata.GetDict().get('version')
return None if not version_dict else version_dict.get('chrome')
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
chrome_atom_to_build = None
if self._chrome_rev:
if (self._chrome_rev == constants.CHROME_REV_SPEC and
self._run.options.chrome_version):
self.chrome_version = self._run.options.chrome_version
logging.info('Using chrome version from options.chrome_version: %s',
self.chrome_version)
else:
self.chrome_version = self._GetChromeVersionFromMetadata()
if self.chrome_version:
logging.info('Using chrome version from the metadata dictionary: %s',
self.chrome_version)
# Perform chrome uprev.
try:
chrome_atom_to_build = commands.MarkChromeAsStable(
self._build_root, self._run.manifest_branch,
self._chrome_rev, self._boards,
chrome_version=self.chrome_version)
except commands.ChromeIsPinnedUprevError as e:
# If uprev failed due to a chrome pin, record that failure (so that the
# build ultimately fails) but try again without the pin, to allow the
# slave to test the newer chrome anyway).
chrome_atom_to_build = e.new_chrome_atom
if chrome_atom_to_build:
results_lib.Results.Record(self.name, e)
logging.PrintBuildbotStepFailure()
logging.error('Chrome is pinned. Unpinning chrome and continuing '
'build for chrome atom %s. This stage will be marked '
'as failed to prevent an uprev.',
chrome_atom_to_build)
logging.info('Deleting pin file at %s and proceeding.',
CHROMEPIN_MASK_PATH)
osutils.SafeUnlink(CHROMEPIN_MASK_PATH)
else:
raise
kwargs = {}
if self._chrome_rev == constants.CHROME_REV_SPEC:
kwargs['revision'] = self.chrome_version
logging.PrintBuildbotStepText('revision %s' % kwargs['revision'])
else:
if not self.chrome_version:
self.chrome_version = self._run.DetermineChromeVersion()
kwargs['tag'] = self.chrome_version
logging.PrintBuildbotStepText('tag %s' % kwargs['tag'])
useflags = self._run.config.useflags
git_cache_dir = (
self._run.options.chrome_preload_dir or self._run.options.git_cache_dir)
commands.SyncChrome(self._build_root,
self._run.options.chrome_root,
useflags,
git_cache_dir=git_cache_dir,
**kwargs)
def _WriteChromeVersionToMetadata(self):
"""Write chrome version to metadata and upload partial json file."""
self._run.attrs.metadata.UpdateKeyDictWithDict(
'version',
{'chrome': self._run.attrs.chrome_version})
self.UploadMetadata(filename=constants.PARTIAL_METADATA_JSON)
def Finish(self):
"""Provide chrome_version to the rest of the run."""
# Even if the stage failed, a None value for chrome_version still
# means something. In other words, this stage tried to run.
self._run.attrs.chrome_version = self.chrome_version
self._WriteChromeVersionToMetadata()
super(SyncChromeStage, self).Finish()
class SimpleChromeArtifactsStage(generic_stages.BoardSpecificBuilderStage,
generic_stages.ArchivingStageMixin):
"""Archive Simple Chrome artifacts."""
option_name = 'chrome_sdk'
config_name = 'chrome_sdk'
category = constants.PRODUCT_CHROME_STAGE
def __init__(self, *args, **kwargs):
super(SimpleChromeArtifactsStage, self).__init__(*args, **kwargs)
self._upload_queue = multiprocessing.Queue()
self._pkg_dir = os.path.join(
self._build_root, constants.DEFAULT_CHROOT_DIR,
'build', self._current_board, portage_util.VDB_PATH)
def _BuildAndArchiveChromeSysroot(self):
"""Generate and upload sysroot for building Chrome."""
assert self.archive_path.startswith(self._build_root)
extra_env = {}
if self._run.config.useflags:
extra_env['USE'] = ' '.join(self._run.config.useflags)
in_chroot_path = path_util.ToChrootPath(self.archive_path)
cmd = ['cros_generate_sysroot', '--out-dir', in_chroot_path, '--board',
self._current_board, '--deps-only', '--package', constants.CHROME_CP]
cros_build_lib.run(cmd, cwd=self._build_root, enter_chroot=True,
extra_env=extra_env)
self._upload_queue.put([constants.CHROME_SYSROOT_TAR])
def _ArchiveChromeEbuildEnv(self):
"""Generate and upload Chrome ebuild environment."""
files = glob.glob(os.path.join(self._pkg_dir, constants.CHROME_CP) + '-*')
if not files:
raise artifact_stages.NothingToArchiveException(
'Failed to find package %s' % constants.CHROME_CP)
if len(files) > 1:
logging.PrintBuildbotStepWarnings()
logging.warning('Expected one package for %s, found %d',
constants.CHROME_CP, len(files))
chrome_dir = sorted(files)[-1]
env_bzip = os.path.join(chrome_dir, 'environment.bz2')
with osutils.TempDir(prefix='chrome-sdk-stage') as tempdir:
# Convert from bzip2 to tar format.
bzip2 = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
cros_build_lib.run(
[bzip2, '-d', env_bzip, '-c'],
stdout=os.path.join(tempdir, constants.CHROME_ENV_FILE))
env_tar = os.path.join(self.archive_path, constants.CHROME_ENV_TAR)
cros_build_lib.CreateTarball(env_tar, tempdir)
self._upload_queue.put([os.path.basename(env_tar)])
def _GenerateAndUploadMetadata(self):
self.UploadMetadata(upload_queue=self._upload_queue,
filename=constants.PARTIAL_METADATA_JSON)
def PerformStage(self):
steps = [self._BuildAndArchiveChromeSysroot, self._ArchiveChromeEbuildEnv,
self._GenerateAndUploadMetadata]
with self.ArtifactUploader(self._upload_queue, archive=False):
parallel.RunParallelSteps(steps)
if (self._run.config.chrome_sdk_build_chrome and
config_lib.IsCanaryMaster(self._run)):
test_stage = TestSimpleChromeWorkflowStage(self._run,
self.buildstore,
self._current_board)
test_stage.Run()
class TestSimpleChromeWorkflowStage(generic_stages.BoardSpecificBuilderStage,
generic_stages.ArchivingStageMixin):
"""Run through the simple chrome workflow."""
category = constants.PRODUCT_CHROME_STAGE
def __init__(self, *args, **kwargs):
super(TestSimpleChromeWorkflowStage, self).__init__(*args, **kwargs)
if self._run.options.chrome_root:
self.chrome_src = os.path.join(self._run.options.chrome_root, 'src')
board_dir = 'out_%s' % self._current_board
self.out_board_dir = os.path.join(
self.chrome_src, board_dir, 'Release')
def _VerifyChromeDeployed(self, tempdir):
"""Check to make sure deploy_chrome ran correctly."""
if not os.path.exists(os.path.join(tempdir, 'chrome')):
raise AssertionError('deploy_chrome did not run successfully!')
def _VerifySDKEnvironment(self):
"""Make sure the SDK environment is set up properly."""
# If the environment wasn't set up, then the output directory wouldn't be
# created after 'gn gen'.
# TODO: Make this check actually look at the environment.
if not os.path.exists(self.out_board_dir):
raise AssertionError('%s not created!' % self.out_board_dir)
# Log args.gn for debugging.
logging.info('ARGS.GN=\n%s',
osutils.ReadFile(os.path.join(self.out_board_dir, 'args.gn')))
def _ShouldEnableGoma(self):
# Enable goma if 1) Chrome actually needs to be built, 2) not
# latest_toolchain (because toolchain prebuilt package may not available
# for goma, crbug.com/728971), 3) goma is available and 4) config says goma
# should be used to build Chrome.
return (self._run.options.managed_chrome and
not self._latest_toolchain and
self._run.options.goma_dir and
self._run.config.chrome_sdk_goma)
def _BuildChrome(self, sdk_cmd, goma):
"""Use the generated SDK to build Chrome."""
# Validate fetching of the SDK and setting everything up.
sdk_cmd.Run(['true'])
sdk_cmd.Run(['gclient', 'runhooks'])
# Generate args.gn and ninja files.
gn_cmd = os.path.join(self.chrome_src, 'buildtools', 'linux64', 'gn')
gn_gen_cmd = '%s gen "%s" --args="$GN_ARGS"' % (gn_cmd, self.out_board_dir)
sdk_cmd.Run(['bash', '-c', gn_gen_cmd])
self._VerifySDKEnvironment()
if goma:
# If goma is enabled, start goma compiler_proxy here, and record
# several information just before building Chrome is started.
goma.Start()
extra_env = goma.GetExtraEnv()
ninja_env_path = os.path.join(goma.goma_log_dir, 'ninja_env')
sdk_cmd.Run(['env', '--null'],
run_args={'extra_env': extra_env,
'stdout': ninja_env_path})
osutils.WriteFile(os.path.join(goma.goma_log_dir, 'ninja_cwd'),
sdk_cmd.cwd)
osutils.WriteFile(os.path.join(goma.goma_log_dir, 'ninja_command'),
cros_build_lib.CmdToStr(sdk_cmd.GetNinjaCommand()))
else:
extra_env = None
result = None
try:
# Build chromium.
result = sdk_cmd.Ninja(run_args={'extra_env': extra_env})
finally:
# In teardown, if goma is enabled, stop the goma compiler proxy,
# and record/copy some information to log directory, which will be
# uploaded to the goma's server in a later stage.
if goma:
goma.Stop()
ninja_log_path = os.path.join(self.chrome_src,
sdk_cmd.GetNinjaLogPath())
if os.path.exists(ninja_log_path):
shutil.copy2(ninja_log_path,
os.path.join(goma.goma_log_dir, 'ninja_log'))
if result:
osutils.WriteFile(os.path.join(goma.goma_log_dir, 'ninja_exit'),
str(result.returncode))
def _TestDeploy(self, sdk_cmd):
"""Test SDK deployment."""
with osutils.TempDir(prefix='chrome-sdk-stage') as tempdir:
# Use the TOT deploy_chrome.
script_path = os.path.join(
self._build_root, constants.CHROMITE_BIN_SUBDIR, 'deploy_chrome')
sdk_cmd.Run([script_path, '--build-dir', self.out_board_dir,
'--staging-only', '--staging-dir', tempdir])
self._VerifyChromeDeployed(tempdir)
def _VMTest(self, sdk_cmd):
"""Run cros_run_test."""
image_path = os.path.join(self.GetImageDirSymlink(),
constants.VM_IMAGE_BIN)
# Run VM test for boards where we've built a VM.
if image_path and os.path.exists(image_path):
sdk_cmd.VMTest(image_path)
def PerformStage(self):
with osutils.TempDir(prefix='chrome-sdk-cache') as tempdir:
cache_dir = os.path.join(tempdir, 'cache')
extra_args = ['--cwd', self.chrome_src, '--sdk-path',
self.archive_path]
# Do not automatically run 'gn gen', that will be done in _BuildChrome.
extra_args.extend(['--nogn-gen'])
if self._ShouldEnableGoma():
# TODO(crbug.com/751010): Revisit to enable DepsCache for
# non-chrome-pfq bots, too.
use_goma_deps_cache = self._run.config.name.endswith('chrome-pfq')
goma = goma_util.Goma(
self._run.options.goma_dir,
self._run.options.goma_client_json,
stage_name=self.StageNamePrefix() if use_goma_deps_cache else None,
chromeos_goma_dir=self._run.options.chromeos_goma_dir)
extra_args.extend(['--nostart-goma', '--gomadir', goma.linux_goma_dir])
self._run.attrs.metadata.UpdateWithDict(
{'goma_tmp_dir_for_simple_chrome': goma.goma_tmp_dir})
else:
goma = None
if constants.USE_CHROME_INTERNAL in self._run.config.useflags:
extra_args.extend(['--internal'])
sdk_cmd = commands.ChromeSDK(
self._build_root, self._current_board, chrome_src=self.chrome_src,
goma=bool(goma), extra_args=extra_args, cache_dir=cache_dir)
self._BuildChrome(sdk_cmd, goma)
self._TestDeploy(sdk_cmd)
self._VMTest(sdk_cmd)
class ChromeLKGMSyncStage(sync_stages.SyncStage):
"""Stage that syncs to the last known good manifest for Chrome."""
output_manifest_sha1 = False
category = constants.PRODUCT_CHROME_STAGE
def GetNextManifest(self):
"""Override: Gets the LKGM from the Chrome tree."""
chrome_lkgm = commands.GetChromeLKGM(self._run.options.chrome_version)
# We need a full buildspecs manager here as we need an initialized manifest
# manager with paths to the spec.
# TODO(mtennant): Consider registering as manifest_manager run param, for
# consistency, but be careful that consumers do not get confused.
# Currently only the "manifest_manager" from ManifestVersionedSync (and
# subclasses) is used later in the flow.
manifest_manager = manifest_version.BuildSpecsManager(
source_repo=self.repo,
manifest_repo=self._GetManifestVersionsRepoUrl(),
build_names=self._run.GetBuilderIds(),
incr_type='build',
force=False,
branch=self._run.manifest_branch,
buildstore=self.buildstore)
manifest_manager.BootstrapFromVersion(chrome_lkgm)
return manifest_manager.GetLocalManifest(chrome_lkgm)
|
{
"content_hash": "c3732bf4927eabfe57689e9ae4827b33",
"timestamp": "",
"source": "github",
"line_count": 370,
"max_line_length": 80,
"avg_line_length": 42.77297297297297,
"alnum_prop": 0.6501326930367749,
"repo_name": "endlessm/chromium-browser",
"id": "9c5a372e7d33f701aac91ecd8ccc2c43fcc3ddd0",
"size": "16020",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/cbuildbot/stages/chrome_stages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from otp.ai.AIBaseGlobal import *
from pandac.PandaModules import *
from DistributedNPCToonBaseAI import *
import ToonDNA
from direct.task.Task import Task
from toontown.ai import DatabaseObject
from toontown.estate import ClosetGlobals
class DistributedNPCTailorAI(DistributedNPCToonBaseAI):
freeClothes = simbase.config.GetBool('free-clothes', 0)
housingEnabled = simbase.config.GetBool('want-housing', 1)
def __init__(self, air, npcId):
DistributedNPCToonBaseAI.__init__(self, air, npcId)
self.timedOut = 0
self.givesQuests = 0
self.customerDNA = None
self.customerId = None
return
def getTailor(self):
return 1
def delete(self):
taskMgr.remove(self.uniqueName('clearMovie'))
self.ignoreAll()
self.customerDNA = None
self.customerId = None
DistributedNPCToonBaseAI.delete(self)
return
def avatarEnter(self):
avId = self.air.getAvatarIdFromSender()
if not self.air.doId2do.has_key(avId):
self.notify.warning('Avatar: %s not found' % avId)
return
if self.isBusy():
self.freeAvatar(avId)
return
av = self.air.doId2do[avId]
self.customerDNA = ToonDNA.ToonDNA()
self.customerDNA.makeFromNetString(av.getDNAString())
self.customerId = avId
av.b_setDNAString(self.customerDNA.makeNetString())
self.acceptOnce(self.air.getAvatarExitEvent(avId), self.__handleUnexpectedExit, extraArgs=[avId])
flag = NPCToons.PURCHASE_MOVIE_START_BROWSE
if self.freeClothes:
flag = NPCToons.PURCHASE_MOVIE_START
if self.housingEnabled and self.isClosetAlmostFull(av):
flag = NPCToons.PURCHASE_MOVIE_START_NOROOM
elif self.air.questManager.hasTailorClothingTicket(av, self) == 1:
flag = NPCToons.PURCHASE_MOVIE_START
if self.housingEnabled and self.isClosetAlmostFull(av):
flag = NPCToons.PURCHASE_MOVIE_START_NOROOM
elif self.air.questManager.hasTailorClothingTicket(av, self) == 2:
flag = NPCToons.PURCHASE_MOVIE_START
if self.housingEnabled and self.isClosetAlmostFull(av):
flag = NPCToons.PURCHASE_MOVIE_START_NOROOM
self.sendShoppingMovie(avId, flag)
DistributedNPCToonBaseAI.avatarEnter(self)
def isClosetAlmostFull(self, av):
numClothes = len(av.clothesTopsList) / 4 + len(av.clothesBottomsList) / 2
if numClothes >= av.maxClothes - 1:
return 1
return 0
def sendShoppingMovie(self, avId, flag):
self.busy = avId
self.sendUpdate('setMovie', [flag,
self.npcId,
avId,
ClockDelta.globalClockDelta.getRealNetworkTime()])
taskMgr.doMethodLater(NPCToons.TAILOR_COUNTDOWN_TIME, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def rejectAvatar(self, avId):
self.notify.warning('rejectAvatar: should not be called by a Tailor!')
def sendTimeoutMovie(self, task):
toon = self.air.doId2do.get(self.customerId)
if toon != None and self.customerDNA:
toon.b_setDNAString(self.customerDNA.makeNetString())
self.timedOut = 1
self.sendUpdate('setMovie', [NPCToons.PURCHASE_MOVIE_TIMEOUT,
self.npcId,
self.busy,
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendClearMovie(None)
return Task.done
def sendClearMovie(self, task):
self.ignore(self.air.getAvatarExitEvent(self.busy))
self.customerDNA = None
self.customerId = None
self.busy = 0
self.timedOut = 0
self.sendUpdate('setMovie', [NPCToons.PURCHASE_MOVIE_CLEAR,
self.npcId,
0,
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendUpdate('setCustomerDNA', [0, ''])
return Task.done
def completePurchase(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.PURCHASE_MOVIE_COMPLETE,
self.npcId,
avId,
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendClearMovie(None)
return
def setDNA(self, blob, finished, which):
avId = self.air.getAvatarIdFromSender()
if avId != self.customerId:
if self.customerId:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCTailorAI.setDNA customer is %s' % self.customerId)
self.notify.warning('customerId: %s, but got setDNA for: %s' % (self.customerId, avId))
return
testDNA = ToonDNA.ToonDNA()
if not testDNA.isValidNetString(blob):
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCTailorAI.setDNA: invalid dna: %s' % blob)
return
if self.air.doId2do.has_key(avId):
av = self.air.doId2do[avId]
if finished == 2 and which > 0:
if self.air.questManager.removeClothingTicket(av, self) == 1 or self.freeClothes:
av.b_setDNAString(blob)
if which & ClosetGlobals.SHIRT:
if av.addToClothesTopsList(self.customerDNA.topTex, self.customerDNA.topTexColor, self.customerDNA.sleeveTex, self.customerDNA.sleeveTexColor) == 1:
av.b_setClothesTopsList(av.getClothesTopsList())
else:
self.notify.warning('NPCTailor: setDNA() - unable to save old tops - we exceeded the tops list length')
if which & ClosetGlobals.SHORTS:
if av.addToClothesBottomsList(self.customerDNA.botTex, self.customerDNA.botTexColor) == 1:
av.b_setClothesBottomsList(av.getClothesBottomsList())
else:
self.notify.warning('NPCTailor: setDNA() - unable to save old bottoms - we exceeded the bottoms list length')
self.air.writeServerEvent('boughtTailorClothes', avId, '%s|%s|%s' % (self.doId, which, self.customerDNA.asTuple()))
else:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCTailorAI.setDNA bogus clothing ticket')
self.notify.warning('NPCTailor: setDNA() - client tried to purchase with bogus clothing ticket!')
if self.customerDNA:
av.b_setDNAString(self.customerDNA.makeNetString())
elif finished == 1:
if self.customerDNA:
av.b_setDNAString(self.customerDNA.makeNetString())
else:
self.sendUpdate('setCustomerDNA', [avId, blob])
else:
self.notify.warning('no av for avId: %d' % avId)
if self.timedOut == 1 or finished == 0:
return
if self.busy == avId:
taskMgr.remove(self.uniqueName('clearMovie'))
self.completePurchase(avId)
elif self.busy:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCTailorAI.setDNA busy with %s' % self.busy)
self.notify.warning('setDNA from unknown avId: %s busy: %s' % (avId, self.busy))
def __handleUnexpectedExit(self, avId):
self.notify.warning('avatar:' + str(avId) + ' has exited unexpectedly')
if self.customerId == avId:
toon = self.air.doId2do.get(avId)
if toon == None:
toon = DistributedToonAI.DistributedToonAI(self.air)
toon.doId = avId
if self.customerDNA:
toon.b_setDNAString(self.customerDNA.makeNetString())
db = DatabaseObject.DatabaseObject(self.air, avId)
db.storeObject(toon, ['setDNAString'])
else:
self.notify.warning('invalid customer avId: %s, customerId: %s ' % (avId, self.customerId))
if self.busy == avId:
self.sendClearMovie(None)
else:
self.notify.warning('not busy with avId: %s, busy: %s ' % (avId, self.busy))
return
|
{
"content_hash": "a4eb8c506b7727d0abb1d8f77535aa8f",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 172,
"avg_line_length": 45.57865168539326,
"alnum_prop": 0.6180204609885369,
"repo_name": "ksmit799/Toontown-Source",
"id": "3a16b6dfc8363e5ca44b458d615671610e12673e",
"size": "8113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/toon/DistributedNPCTailorAI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1441"
},
{
"name": "PLSQL",
"bytes": "901"
},
{
"name": "Python",
"bytes": "15617225"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
}
|
from . import CATALOGS_USE_SOCIAL_GRAPH
if CATALOGS_USE_SOCIAL_GRAPH:
from django.conf import settings
def inverse(edge_type):
from social_graph.models import EdgeTypeAssociation
return EdgeTypeAssociation.objects.get(direct=edge_type).inverse
# TODO: re-implement!!!
class GraphGrouping(object):
def __init__(self):
super(GraphGrouping, self).__init__()
from social_graph import Graph
self.graph = Graph()
def group(self, objects):
result = dict()
for obj in objects:
for group in self.memberships(obj):
if not str(group) in result:
result[str(group)] = {
'group_obj': group,
'object_list': [],
'count': 0
}
result[str(group)]['object_list'].append(obj)
result[str(group)]['count'] += 1
result['group_list'] = [(key, value['count']) for key, value in result.items()]
return result
def memberships(self, obj):
return []
class RelationTargetGrouping(GraphGrouping):
def __init__(self, edge_type):
super(RelationTargetGrouping, self).__init__()
self.edge_type = edge_type
def memberships(self, obj):
count = self.graph.edge_count(obj, self.edge_type)
return [node for node, attributes, time in self.graph.edge_range(obj, self.edge_type, 0, count)]
class ClassifierGrouping(GraphGrouping):
def __init__(self, classifier):
super(ClassifierGrouping, self).__init__()
from social_graph.models import EdgeType
have_value_edge_name = getattr(settings, 'HAVE_VALUE_EDGE_NAME', None)
described_by_edge_name = getattr(settings, 'DESCRIBED_BY_EDGE_NAME', None)
self.classifier = classifier
have_value = EdgeType.objects.get(name=have_value_edge_name)
self.described_by = EdgeType.objects.get(name=described_by_edge_name)
count = self.graph.edge_count(classifier, have_value)
self.groups = set(
[node for node, attributes, time in self.graph.edge_range(classifier, have_value, 0, count)]
)
def memberships(self, obj):
count = self.graph.edge_count(obj, self.described_by)
return set([
node for node, attributes, time in self.graph.edge_range(obj, self.described_by, 0, count)
]) & self.groups
|
{
"content_hash": "586c8da793b5a12f47e4aa94bac702e3",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 108,
"avg_line_length": 37.12676056338028,
"alnum_prop": 0.5618361153262519,
"repo_name": "suselrd/django-catalog-wizard",
"id": "5fdbd002c7211bbed83fc24ebd13f5f324a615cc",
"size": "2651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catalog/graph_goupings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1076"
},
{
"name": "Python",
"bytes": "86643"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import patterns, include, url
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', '{{ project_name }}.views.home', name='home'),
# url(r'^{{ project_name }}/', include('{{ project_name }}.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += patterns('django.views',
(r'^media/(?P<path>.*)$', 'static.serve', {'document_root': settings.MEDIA_ROOT}),
)
|
{
"content_hash": "0a4fdb02fd2528707218ddb49834d663",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 90,
"avg_line_length": 34.30434782608695,
"alnum_prop": 0.6628643852978454,
"repo_name": "zbyte64/django-dockit",
"id": "01714a9f6c9184d8fd40f923d96c4b552cc3c809",
"size": "789",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2494"
},
{
"name": "Python",
"bytes": "428384"
}
],
"symlink_target": ""
}
|
from graphics.widgets.verified_input_f import VerifiedInputF
import file_experts.file_expert as file_expert
import constants.input_constants as const
import tkinter as tk
class FileAndDirectoryNameInputF(tk.Frame):
""" Use to get user input for the new directory / file name."""
def __init__(self,
parent,
file_extension: str,
initial_directory_path: str,
name_change_eh,
file_input=False,
disabled=False):
"""
:param parent: Parent.
:param file_extension: New file's extension.
:param initial_directory_path: Initial save directory.
:param name_change_eh: Method that will be called when the file
name is changed.
:param file_input: - Default: False;
- True if used to get input for a new file
name.
:param disabled: - Default: True;
- If True all the widgets will be disabled.
"""
tk.Frame.__init__(self,
parent,
padx=const.FNI_FRAME_PADX,
pady=const.FNI_FRAME_PADY)
self._name_change_eh = name_change_eh
self._allowed_characters = const.FNI_ALLOWED_CHARACTERS
self._directory_path = initial_directory_path
self._file_extension = file_extension
self._file_input = file_input
if file_input:
self._validation_method = self._is_file_name_valid
else:
self._validation_method = self._is_directory_name_valid
self._create_widgets()
self._place_widgets()
self._file_name = ''
if disabled:
self.disable()
#########################################################################
# Widget creation and placement
def _create_widgets(self):
""" Creates the widgets."""
self._f_user_information = tk.Frame(
self,
padx=const.FNI_WIDGETS_PADX,
pady=const.FNI_WIDGETS_PADY
)
self._lbl_user_instruction = tk.Label(
self._f_user_information,
text=const.FNI_USER_INSTRUCTION,
font=const.FNI_L_FONT
)
self._lbl_allowed_characters = tk.Label(
self._f_user_information,
text=const.FNI_ALLOWED_MSG,
font=const.FNI_S_FONT
)
self._vi_name_input = VerifiedInputF(
parent=self,
validation_method=self._validation_method,
valid_input_eh=self._name_changed_eh,
invalid_input_eh=self._name_changed_eh,
disabled=True
)
if self._file_input:
self._lbl_file_extension = tk.Label(
self,
padx=const.FNI_WIDGETS_PADX,
pady=const.FNI_WIDGETS_PADY,
text=self._file_extension,
font=const.FNI_S_FONT
)
def _place_widgets(self):
""" Places the widgets."""
self._lbl_user_instruction.pack(side='top',
fill='both',
expand=True)
self._lbl_allowed_characters.pack(side='top',
fill='both',
expand=True)
self._f_user_information.pack(side='left',
fill='both',
expand=True)
self._vi_name_input.pack(side='left',
fill='both',
expand=True)
if self._file_input:
self._lbl_file_extension.pack(side='left',
fill='both',
expand=True)
#########################################################################
# Event handling
def _name_changed_eh(self):
"""
- Automatically called when the user changes the desired name.
"""
self._file_name = self._vi_name_input.get_input()
new_file_name = self.get_name()
self._name_change_eh(new_file_name)
#########################################################################
# Validation methods
def _is_file_name_valid(
self,
file_name: str):
"""
- Checks if the file name is valid.
- A file name is valid if it contains only the allowed
characters, the save location exists and does not contain a
file with the same name and extension.
:return: - True if the file name is valid;
- False otherwise.
"""
rez = file_expert.is_file_name_valid(
allowed_characters=const.FNI_ALLOWED_CHARACTERS,
directory_path=self._directory_path,
file_extension=self._file_extension,
file_name=file_name
)
return rez
def _is_directory_name_valid(
self,
directory_name: str):
"""
- Checks if the directory name is valid.
- A directory name is valid if it contains only the allowed
characters, the save location exists and does not contain a
directory with the same name.
:return: - True if the directory name is valid;
- False otherwise.
"""
rez = file_expert.is_directory_name_valid(
allowed_characters=const.FNI_ALLOWED_CHARACTERS,
directory_path=self._directory_path,
directory_name=directory_name
)
return rez
#########################################################################
# Public methods
def change_directory_path(
self,
new_directory_path: str):
""" Changes the save location.
:param new_directory_path: New save directory.
"""
self._directory_path = new_directory_path
self._vi_name_input.check_input()
def change_file_extension(
self,
new_file_extension: str):
""" Changes the file extension.
:param new_file_extension: New file extension.
"""
if self._file_input:
self._file_extension = new_file_extension
self._lbl_file_extension.config(text=self._file_extension)
self._vi_name_input.check_input()
def get_name(self):
""" Returns the file/directory name if it's valid, '' otherwise.
:return: - File/Directory name if it is valid;
- '' otherwise.
"""
name = self._vi_name_input.get_input()
if self._file_input and self._is_file_name_valid(name):
return name
if not self._file_input and self._is_directory_name_valid(name):
return name
return ''
def enable(self):
""" Enables all the widgets."""
if self._file_input:
self._lbl_file_extension.config(state='normal')
self._lbl_allowed_characters.config(state='normal')
self._lbl_user_instruction.config(state='normal')
self._vi_name_input.enable()
def disable(self):
""" Disables all the widgets."""
self._vi_name_input.disable()
self._lbl_user_instruction.config(state='disabled')
self._lbl_allowed_characters.config(state='disabled')
if self._file_input:
self._lbl_file_extension.config(state='disabled')
#########################################################################
|
{
"content_hash": "448e339eda4787b27b7affc1c6369118",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 77,
"avg_line_length": 30.31764705882353,
"alnum_prop": 0.49967662656836115,
"repo_name": "dani-i/bachelor-project",
"id": "d7b23feb70e43f54af5f89add400066dff392d97",
"size": "7731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphics/input/file_and_directory_name_input_f.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "566079"
}
],
"symlink_target": ""
}
|
import json
from typing import Any, Dict, Mapping, Optional
from urllib.parse import quote_plus
from mock import patch
from eduid_common.api.testing import EduidAPITestCase
from eduid_webapp.phone.app import PhoneApp, phone_init_app
from eduid_webapp.phone.helpers import PhoneMsg
class PhoneTests(EduidAPITestCase):
app: PhoneApp
def setUp(self):
super(PhoneTests, self).setUp(copy_user_to_private=True)
def load_app(self, config: Mapping[str, Any]) -> PhoneApp:
"""
Called from the parent class, so we can provide the appropriate flask
app for this test case.
"""
return phone_init_app('testing', config)
def update_config(self, config: Dict[str, Any]) -> Dict[str, Any]:
config.update(
{
'available_languages': {'en': 'English', 'sv': 'Svenska'},
'msg_broker_url': 'amqp://dummy',
'am_broker_url': 'amqp://dummy',
'celery_config': {'result_backend': 'amqp', 'task_serializer': 'json'},
'phone_verification_timeout': 7200,
'default_country_code': '46',
'throttle_resend_seconds': 300,
}
)
return config
# parameterized test methods
def _get_all_phone(self, eppn: Optional[str] = None):
"""
GET all phone data for some user
:param eppn: eppn for the user
"""
response = self.browser.get('/all')
self.assertEqual(response.status_code, 302) # Redirect to token service
eppn = eppn or self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
response2 = client.get('/all')
return json.loads(response2.data)
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('eduid_webapp.phone.verifications.get_short_hash')
@patch('eduid_common.api.msg.MsgRelay.sendsms')
def _post_phone(
self,
mock_phone_validator: Any,
mock_code_verification: Any,
mock_request_user_sync: Any,
mod_data: Optional[dict] = None,
send_data: bool = True,
):
"""
POST phone data to add a new phone number to the test user
:param mod_data: to control what data is POSTed
:param send_data: whether to POST any data at all
"""
mock_phone_validator.return_value = True
mock_code_verification.return_value = u'5250f9a4'
mock_request_user_sync.side_effect = self.request_user_sync
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {
'number': '+34670123456',
'verified': False,
'primary': False,
'csrf_token': sess.get_csrf_token(),
}
if mod_data:
data.update(mod_data)
if send_data:
return client.post('/new', data=json.dumps(data), content_type=self.content_type_json)
return client.post('/new')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
def _post_primary(self, mock_request_user_sync: Any, mod_data: Optional[dict] = None):
"""
Set phone number as the primary number for the test user
:param mod_data: to control what data is POSTed
"""
mock_request_user_sync.side_effect = self.request_user_sync
response = self.browser.post('/primary')
self.assertEqual(response.status_code, 302) # Redirect to token service
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': '+34609609609', 'csrf_token': sess.get_csrf_token()}
if mod_data:
data.update(mod_data)
return client.post('/primary', data=json.dumps(data), content_type=self.content_type_json)
@patch('eduid_common.api.am.AmRelay.request_user_sync')
def _remove(self, mock_request_user_sync: Any, mod_data: Optional[dict] = None):
"""
Remove phone number from the test user
:param mod_data: to control what data is POSTed
"""
mock_request_user_sync.side_effect = self.request_user_sync
response = self.browser.post('/remove')
self.assertEqual(response.status_code, 302) # Redirect to token service
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': '+34609609609', 'csrf_token': sess.get_csrf_token()}
if mod_data:
data.update(mod_data)
return client.post('/remove', data=json.dumps(data), content_type=self.content_type_json)
@patch('eduid_webapp.phone.verifications.get_short_hash')
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('eduid_common.api.msg.MsgRelay.sendsms')
def _resend_code(
self,
mock_phone_validator: Any,
mock_request_user_sync: Any,
mock_code_verification: Any,
mod_data: Optional[dict] = None,
):
"""
Send a POST request to trigger re-sending a verification code for an unverified phone number in the test user.
:param mod_data: to control the data to be POSTed
"""
mock_phone_validator.return_value = True
mock_request_user_sync.side_effect = self.request_user_sync
mock_code_verification.return_value = u'5250f9a4'
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': '+34609609609', 'csrf_token': sess.get_csrf_token()}
if mod_data:
data.update(mod_data)
return client.post('/resend-code', data=json.dumps(data), content_type=self.content_type_json)
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('eduid_webapp.phone.verifications.get_short_hash')
@patch('eduid_common.api.msg.MsgRelay.sendsms')
def _get_code_backdoor(
self,
mock_phone_validator: Any,
mock_code_verification: Any,
mock_request_user_sync: Any,
mod_data: Optional[dict] = None,
phone: str = '+34670123456',
code: str = '5250f9a4',
):
"""
POST phone data to generate a verification state,
and try to get the generated code through the backdoor
:param mod_data: to control what data is POSTed
:param phone: the phone to use
:param code: mock verification code
"""
mock_phone_validator.return_value = True
mock_code_verification.return_value = code
mock_request_user_sync.side_effect = self.request_user_sync
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {
'number': phone,
'verified': False,
'primary': False,
'csrf_token': sess.get_csrf_token(),
}
if mod_data:
data.update(mod_data)
client.post('/new', data=json.dumps(data), content_type=self.content_type_json)
client.set_cookie('localhost', key=self.app.conf.magic_cookie_name, value=self.app.conf.magic_cookie)
phone = quote_plus(phone)
eppn = quote_plus(eppn)
return client.get(f'/get-code?phone={phone}&eppn={eppn}')
# actual tests
def test_get_all_phone(self):
phone_data = self._get_all_phone()
self.assertEqual('GET_PHONE_ALL_SUCCESS', phone_data['type'])
self.assertIsNotNone(phone_data['payload']['csrf_token'])
self.assertEqual('+34609609609', phone_data['payload']['phones'][0].get('number'))
self.assertEqual(True, phone_data['payload']['phones'][0].get('primary'))
self.assertEqual('+34 6096096096', phone_data['payload']['phones'][1].get('number'))
self.assertEqual(False, phone_data['payload']['phones'][1].get('primary'))
def test_post_phone_error_no_data(self):
response = self._post_phone(send_data=False)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data['type'])
def test_post_phone_country_code(self):
response = self.browser.post('/new')
self.assertEqual(response.status_code, 302) # Redirect to token service
response = self._post_phone()
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual(u'+34670123456', new_phone_data['payload']['phones'][2].get('number'))
self.assertEqual(False, new_phone_data['payload']['phones'][2].get('verified'))
def test_post_phone_no_country_code(self):
data = {'number': '0701234565'}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual(u'+46701234565', new_phone_data['payload']['phones'][2].get('number'))
self.assertEqual(False, new_phone_data['payload']['phones'][2].get('verified'))
def test_post_phone_wrong_csrf(self):
data = {'csrf_token': 'wrong-token'}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data['type'])
self.assertEqual(['CSRF failed to validate'], new_phone_data['payload']['error']['csrf_token'])
def test_post_phone_invalid(self):
data = {'number': '0'}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data['type'])
self.assertEqual(['phone.phone_format'], new_phone_data['payload']['error']['number'])
def test_post_phone_as_verified(self):
data = {'verified': True}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual(u'+34670123456', new_phone_data['payload']['phones'][2].get('number'))
self.assertFalse(new_phone_data['payload']['phones'][2].get('verified'))
self.assertFalse(new_phone_data['payload']['phones'][2].get('primary'))
def test_post_phone_as_primary(self):
data = {'primary': True}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual(u'+34670123456', new_phone_data['payload']['phones'][2].get('number'))
self.assertFalse(new_phone_data['payload']['phones'][2].get('verified'))
self.assertFalse(new_phone_data['payload']['phones'][2].get('primary'))
def test_post_phone_bad_swedish_mobile(self):
data = {'number': '0711234565'}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data['type'])
self.assertEqual(['phone.swedish_mobile_format'], new_phone_data['payload']['error'].get('number'))
def test_post_phone_bad_country_code(self):
data = {'number': '00711234565'}
response = self._post_phone(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data['type'])
self.assertEqual(['phone.e164_format'], new_phone_data['payload']['error'].get('_schema'))
def test_post_primary(self):
response = self._post_primary()
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_PRIMARY_SUCCESS', new_phone_data['type'])
self.assertEqual(True, new_phone_data['payload']['phones'][0]['verified'])
self.assertEqual(True, new_phone_data['payload']['phones'][0]['primary'])
self.assertEqual(u'+34609609609', new_phone_data['payload']['phones'][0]['number'])
self.assertEqual(False, new_phone_data['payload']['phones'][1]['verified'])
self.assertEqual(False, new_phone_data['payload']['phones'][1]['primary'])
self.assertEqual(u'+34 6096096096', new_phone_data['payload']['phones'][1]['number'])
def test_post_primary_no_csrf(self):
data = {'csrf_token': ''}
response = self._post_primary(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_PRIMARY_FAIL', new_phone_data['type'])
self.assertEqual(['CSRF failed to validate'], new_phone_data['payload']['error']['csrf_token'])
def test_post_primary_unknown(self):
data = {'number': '+66666666666'}
response = self._post_primary(mod_data=data)
self.assertEqual(response.status_code, 200)
new_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_PRIMARY_FAIL', new_phone_data['type'])
self.assertEqual(PhoneMsg.unknown_phone.value, new_phone_data['payload']['message'])
def test_remove(self):
response = self._remove()
self.assertEqual(response.status_code, 200)
delete_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_REMOVE_SUCCESS', delete_phone_data['type'])
self.assertEqual(u'+34 6096096096', delete_phone_data['payload']['phones'][0].get('number'))
def test_remove_primary_other_unverified(self):
data = {'number': '+34 6096096096'}
response = self._remove(mod_data=data)
self.assertEqual(response.status_code, 200)
delete_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_REMOVE_SUCCESS', delete_phone_data['type'])
self.assertEqual(u'+34609609609', delete_phone_data['payload']['phones'][0].get('number'))
def test_remove_no_csrf(self):
data = {'csrf_token': ''}
response = self._remove(mod_data=data)
self.assertEqual(response.status_code, 200)
delete_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_REMOVE_FAIL', delete_phone_data['type'])
self.assertEqual(['CSRF failed to validate'], delete_phone_data['payload']['error']['csrf_token'])
def test_remove_unknown(self):
data = {'number': '+33333333333'}
response = self._remove(mod_data=data)
self.assertEqual(response.status_code, 200)
delete_phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_REMOVE_FAIL', delete_phone_data['type'])
self.assertEqual('phones.unknown_phone', delete_phone_data['payload']['message'])
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('eduid_webapp.phone.verifications.get_short_hash')
@patch('eduid_common.api.msg.MsgRelay.sendsms')
def test_remove_primary_other_verified(self, mock_phone_validator, mock_code_verification, mock_request_user_sync):
mock_phone_validator.return_value = True
mock_request_user_sync.side_effect = self.request_user_sync
mock_code_verification.return_value = u'12345'
response = self.browser.post('/remove')
self.assertEqual(response.status_code, 302) # Redirect to token service
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {
'number': u'+34609123321',
'verified': False,
'primary': False,
'csrf_token': sess.get_csrf_token(),
}
client.post('/new', data=json.dumps(data), content_type=self.content_type_json)
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': u'+34609123321', 'code': u'12345', 'csrf_token': sess.get_csrf_token()}
response2 = client.post('/verify', data=json.dumps(data), content_type=self.content_type_json)
verify_phone_data = json.loads(response2.data)
self.assertEqual('POST_PHONE_VERIFY_SUCCESS', verify_phone_data['type'])
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': '+34609609609', 'csrf_token': sess.get_csrf_token()}
response2 = client.post('/remove', data=json.dumps(data), content_type=self.content_type_json)
self.assertEqual(response2.status_code, 200)
delete_phone_data = json.loads(response2.data)
self.assertEqual('POST_PHONE_REMOVE_SUCCESS', delete_phone_data['type'])
self.assertEqual(u'+34 6096096096', delete_phone_data['payload']['phones'][0].get('number'))
def test_resend_code(self):
response = self.browser.post('/resend-code')
self.assertEqual(response.status_code, 302) # Redirect to token service
response = self._resend_code()
self.assertEqual(response.status_code, 200)
phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_RESEND_CODE_SUCCESS', phone_data['type'])
self.assertEqual(u'+34609609609', phone_data['payload']['phones'][0].get('number'))
self.assertEqual(u'+34 6096096096', phone_data['payload']['phones'][1].get('number'))
def test_resend_code_no_csrf(self):
data = {'csrf_token': 'wrong-token'}
response = self._resend_code(mod_data=data)
self.assertEqual(response.status_code, 200)
phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_RESEND_CODE_FAIL', phone_data['type'])
self.assertEqual(['CSRF failed to validate'], phone_data['payload']['error']['csrf_token'])
def test_resend_code_unknown(self):
data = {'number': '+66666666666'}
response = self._resend_code(mod_data=data)
self.assertEqual(response.status_code, 200)
phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_RESEND_CODE_FAIL', phone_data['type'])
self.assertEqual('user-out-of-sync', phone_data['payload']['message'])
def test_resend_code_throttle(self):
response = self._resend_code()
self.assertEqual(response.status_code, 200)
phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_RESEND_CODE_SUCCESS', phone_data['type'])
self.assertEqual(u'+34609609609', phone_data['payload']['phones'][0].get('number'))
self.assertEqual(u'+34 6096096096', phone_data['payload']['phones'][1].get('number'))
response = self._resend_code()
self.assertEqual(response.status_code, 200)
phone_data = json.loads(response.data)
self.assertEqual('POST_PHONE_RESEND_CODE_FAIL', phone_data['type'])
self.assertEqual(phone_data['error'], True)
self.assertEqual(phone_data['payload']['message'], 'still-valid-code')
self.assertIsNotNone(phone_data['payload']['csrf_token'])
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('eduid_webapp.phone.verifications.get_short_hash')
@patch('eduid_common.api.msg.MsgRelay.sendsms')
def test_verify(self, mock_phone_validator, mock_code_verification, mock_request_user_sync):
mock_phone_validator.return_value = True
mock_request_user_sync.side_effect = self.request_user_sync
mock_code_verification.return_value = u'12345'
response = self.browser.post('/verify')
self.assertEqual(response.status_code, 302) # Redirect to token service
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {
'number': u'+34609123321',
'verified': False,
'primary': False,
'csrf_token': sess.get_csrf_token(),
}
client.post('/new', data=json.dumps(data), content_type=self.content_type_json)
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': u'+34609123321', 'code': u'12345', 'csrf_token': sess.get_csrf_token()}
response2 = client.post('/verify', data=json.dumps(data), content_type=self.content_type_json)
verify_phone_data = json.loads(response2.data)
self.assertEqual('POST_PHONE_VERIFY_SUCCESS', verify_phone_data['type'])
self.assertEqual(u'+34609123321', verify_phone_data['payload']['phones'][2]['number'])
self.assertEqual(True, verify_phone_data['payload']['phones'][2]['verified'])
self.assertEqual(False, verify_phone_data['payload']['phones'][2]['primary'])
self.assertEqual(self.app.proofing_log.db_count(), 1)
@patch('eduid_common.api.am.AmRelay.request_user_sync')
@patch('eduid_webapp.phone.verifications.get_short_hash')
@patch('eduid_common.api.msg.MsgRelay.sendsms')
def test_verify_fail(self, mock_phone_validator, mock_code_verification, mock_request_user_sync):
mock_phone_validator.return_value = True
mock_request_user_sync.side_effect = self.request_user_sync
mock_code_verification.return_value = u'12345'
response = self.browser.post('/verify')
self.assertEqual(response.status_code, 302) # Redirect to token service
eppn = self.test_user_data['eduPersonPrincipalName']
with self.session_cookie(self.browser, eppn) as client:
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {
'number': u'+34609123321',
'verified': False,
'primary': False,
'csrf_token': sess.get_csrf_token(),
}
client.post('/new', data=json.dumps(data), content_type=self.content_type_json)
with self.app.test_request_context():
with client.session_transaction() as sess:
data = {'number': u'+34609123321', 'code': u'wrong_code', 'csrf_token': sess.get_csrf_token()}
response2 = client.post('/verify', data=json.dumps(data), content_type=self.content_type_json)
verify_phone_data = json.loads(response2.data)
self.assertEqual(verify_phone_data['type'], 'POST_PHONE_VERIFY_FAIL')
self.assertEqual(verify_phone_data['payload']['message'], 'phones.code_invalid_or_expired')
self.assertEqual(self.app.proofing_log.db_count(), 0)
def test_post_phone_duplicated_number(self):
data = {'number': '0701234565'}
response1 = self._post_phone(mod_data=data)
self.assertEqual(response1.status_code, 200)
new_phone_data = json.loads(response1.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual(u'+46701234565', new_phone_data['payload']['phones'][2].get('number'))
self.assertEqual(False, new_phone_data['payload']['phones'][2].get('verified'))
eppn = self.test_user_data['eduPersonPrincipalName']
# Save above phone number for user in central db
user = self.app.private_userdb.get_user_by_eppn(eppn)
self.request_user_sync(user)
response2 = self._post_phone(mod_data=data)
self.assertEqual(response2.status_code, 200)
new_phone_data2 = json.loads(response2.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data2['type'])
self.assertEqual(['phone.phone_duplicated'], new_phone_data2['payload']['error'].get('number'))
def test_post_phone_duplicated_number_e_164(self):
data = {'number': '+46701234565'} # e164 format
response1 = self._post_phone(mod_data=data)
self.assertEqual(response1.status_code, 200)
new_phone_data = json.loads(response1.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual('+46701234565', new_phone_data['payload']['phones'][2].get('number'))
self.assertEqual(False, new_phone_data['payload']['phones'][2].get('verified'))
eppn = self.test_user_data['eduPersonPrincipalName']
# Save above phone number for user in central db
user = self.app.private_userdb.get_user_by_eppn(eppn)
self.request_user_sync(user)
data = {'number': '0701234565'} # National format
response2 = self._post_phone(mod_data=data)
self.assertEqual(response2.status_code, 200)
new_phone_data2 = json.loads(response2.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data2['type'])
self.assertEqual(['phone.phone_duplicated'], new_phone_data2['payload']['error'].get('number'))
def test_post_phone_duplicated_number_e_164_2(self):
data = {'number': '0701234565'} # e164 format
response1 = self._post_phone(mod_data=data)
self.assertEqual(response1.status_code, 200)
new_phone_data = json.loads(response1.data)
self.assertEqual('POST_PHONE_NEW_SUCCESS', new_phone_data['type'])
self.assertEqual('+46701234565', new_phone_data['payload']['phones'][2].get('number'))
self.assertEqual(False, new_phone_data['payload']['phones'][2].get('verified'))
eppn = self.test_user_data['eduPersonPrincipalName']
# Save above phone number for user in central db
user = self.app.private_userdb.get_user_by_eppn(eppn)
self.request_user_sync(user)
data = {'number': '+46701234565'} # National format
response2 = self._post_phone(mod_data=data)
self.assertEqual(response2.status_code, 200)
new_phone_data2 = json.loads(response2.data)
self.assertEqual('POST_PHONE_NEW_FAIL', new_phone_data2['type'])
self.assertEqual(['phone.phone_duplicated'], new_phone_data2['payload']['error'].get('number'))
def test_get_code_backdoor(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'dev'
code = '0123456'
resp = self._get_code_backdoor(code=code)
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.data, code.encode('ascii'))
def test_get_code_no_backdoor_in_pro(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'pro'
code = '0123456'
resp = self._get_code_backdoor(code=code)
self.assertEqual(resp.status_code, 400)
def test_get_code_no_backdoor_misconfigured1(self):
self.app.conf.magic_cookie = 'magic-cookie'
self.app.conf.magic_cookie_name = ''
self.app.conf.environment = 'dev'
code = '0123456'
resp = self._get_code_backdoor(code=code)
self.assertEqual(resp.status_code, 400)
def test_get_code_no_backdoor_misconfigured2(self):
self.app.conf.magic_cookie = ''
self.app.conf.magic_cookie_name = 'magic'
self.app.conf.environment = 'dev'
code = '0123456'
resp = self._get_code_backdoor(code=code)
self.assertEqual(resp.status_code, 400)
|
{
"content_hash": "6b25b55f765208977cdfe8f63e64320a",
"timestamp": "",
"source": "github",
"line_count": 697,
"max_line_length": 119,
"avg_line_length": 41.80057388809182,
"alnum_prop": 0.6197357130598936,
"repo_name": "SUNET/eduid-webapp",
"id": "14feeb412612f302ccd876cc5f15b9cb471f3d18",
"size": "30776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/eduid_webapp/phone/tests/test_app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "433"
},
{
"name": "HTML",
"bytes": "46956"
},
{
"name": "Python",
"bytes": "1041956"
},
{
"name": "Shell",
"bytes": "577"
}
],
"symlink_target": ""
}
|
import os
import sqlite3
import asyncio
import contextlib
import ipaddress
from collections import namedtuple
from itertools import chain
from pprint import pprint
from datetime import datetime
import aiodns
import requests
DB_NAME = 'dnsdb.sqlite3'
DnsQuery = namedtuple('DnsRequest',
'host created_at a, aaaa, cname, mx, ns, soa')
DnsResponse = namedtuple('DnsResponse', 'response status')
Host = namedtuple('Host', 'name type_id')
class Resolver:
def __init__(self, *args, **kwargs):
self._resolver = aiodns.DNSResolver(*args, **kwargs)
async def single_query(self, host, qtype):
try:
return DnsResponse(
await self._resolver.query(host, qtype),
'OK'
)
except (UnicodeError, aiodns.error.DNSError):
# TODO handle errors
return DnsResponse(None, 'FAIL')
except:
return DnsResponse(None, 'FAIL')
async def query(self, domain):
print(domain)
now = datetime.utcnow().isoformat(' ')
return DnsQuery(
domain,
now,
await self.single_query(domain, 'A'),
await self.single_query(domain, 'AAAA'),
await self.single_query(domain, 'CNAME'),
await self.single_query(domain, 'MX'),
await self.single_query(domain, 'NS'),
await self.single_query(domain, 'SOA')
)
def get_new_domains():
# yield from ['google.com']
# return
response = requests.get(
'https://isc.sans.edu/feeds/suspiciousdomains_High.txt'
)
response.raise_for_status()
yield from (
line
for line in response.text.split('\n')
if not line.startswith('#') and '.' in line
)
def get_db():
"""Opens a new database connection"""
rv = sqlite3.connect(DB_NAME)
rv.row_factory = sqlite3.Row
return rv
def create_db():
"""Initializes the database.
>>> create_db()
"""
with contextlib.closing(get_db()) as db:
cursor = db.cursor()
with open('schema.sql', mode='r') as file:
cursor.executescript(file.read())
db.commit()
def remove_db():
os.remove(DB_NAME)
def add_domains():
with contextlib.closing(get_db()) as db:
cursor = db.cursor()
cursor.executemany(
'INSERT INTO host(name, type_id) VALUES (?, ?)',
[(domain, 2) for domain in get_new_domains()]
)
db.commit()
def check_domains(domains):
resolver = Resolver()
loop = asyncio.get_event_loop()
tasks = [asyncio.ensure_future(resolver.query(domain)) for domain in domains]
# TODO meke generator
return loop.run_until_complete(asyncio.gather(*tasks))
def get_host_type_id(host):
try:
ipaddress.ip_address(host)
except ValueError:
return 2
else:
return 1
def get_response(obj):
result = getattr(obj, 'response')
if not result:
return []
else:
return result
def save_changes(dns_query: DnsQuery):
with contextlib.closing(get_db()) as db:
cursor = db.cursor()
r = cursor.execute('SELECT id FROM host WHERE name=?', (dns_query.host, ))
host_id = list(r)[0][0]
cursor.execute('''INSERT INTO scan(
host_id, query_a_status, query_cname_status, query_mx_status,
query_ns_status, query_soa_status, created_at
) VALUES (?, ?, ?, ?, ?, ?, ?)''', (
host_id,
dns_query.a.status,
dns_query.cname.status,
dns_query.mx.status,
dns_query.ns.status,
dns_query.soa.status,
dns_query.created_at
)
)
hosts = [
Host(query.host, get_host_type_id(query.host))
for query in chain(
get_response(dns_query.a),
get_response(dns_query.aaaa),
get_response(dns_query.mx),
get_response(dns_query.ns)
)
]
if dns_query.cname.response:
cname = dns_query.cname.response.cname
hosts += [
Host(cname, get_host_type_id(cname))
]
if dns_query.soa.response:
soa_nsname = dns_query.soa.response.nsname
hosts += [
Host(soa_nsname, get_host_type_id(soa_nsname))
]
hosts = set(hosts)
cursor.executemany(
'INSERT OR IGNORE INTO host(name, type_id) VALUES (?, ?)',
hosts
)
r = cursor.execute(
'SELECT id FROM scan WHERE host_id=(SELECT id FROM host WHERE name=?) ORDER BY julianday(created_at) DESC LIMIT 1',
(dns_query.host, )
)
scan_id = list(r)[0][0]
cursor.executemany(
'INSERT INTO record_a(host_id, scan_id, ip_address_id, ttl) VALUES(?, ?, (SELECT id FROM host WHERE name=?), ?)',
[(host_id, scan_id, a.host, a.ttl) for a in chain(get_response(dns_query.a), get_response(dns_query.aaaa))]
)
cursor.executemany(
'INSERT INTO record_mx(host_id, scan_id, mail_id, priority, ttl) VALUES(?, ?, (SELECT id FROM host WHERE name=?), ?, ?)',
[(host_id, scan_id, mx.host, mx.priority, mx.ttl) for mx in get_response(dns_query.mx)]
)
cursor.executemany(
'INSERT INTO record_ns(host_id, scan_id, nameserver_id, ttl) VALUES(?, ?, (SELECT id FROM host WHERE name=?), ?)',
[(host_id, scan_id, ns.host, ns.ttl) for ns in get_response(dns_query.ns)]
)
if dns_query.cname.response:
cursor.execute(
'INSERT INTO record_cname(host_id, scan_id, cname, ttl) VALUES(?, ?, ?, ?)',
(
host_id,
scan_id,
dns_query.cname.response.cname,
dns_query.cname.response.ttl
)
)
if dns_query.soa.response:
cursor.execute(
'INSERT INTO record_soa(host_id, scan_id, nameserver_id, hostmaster, serial, refresh, retry, expires, minttl, ttl) VALUES(?, ?, (SELECT id FROM host WHERE name=?), ?, ?, ?, ?, ?, ?, ?)',
(
host_id,
scan_id,
dns_query.soa.response.nsname,
dns_query.soa.response.hostmaster,
dns_query.soa.response.serial,
dns_query.soa.response.refresh,
dns_query.soa.response.retry,
dns_query.soa.response.expires,
dns_query.soa.response.minttl,
dns_query.soa.response.ttl
)
)
db.commit()
def get_domains():
with contextlib.closing(get_db()) as db:
cursor = db.cursor()
hosts = [row[0] for row in cursor.execute('SELECT name FROM host WHERE type_id=2')]
return hosts
def update_domains():
for query in check_domains(get_domains()):
save_changes(query)
def main():
create_db()
add_domains()
# save_changes(check_domains(['google.com'])[0])
# pprint(check_domains(['google.com'])[0])
# update_domains()
if __name__ == '__main__':
main()
# TODO add lower and last dot
|
{
"content_hash": "0cb681438eef784daa41698ac776054d",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 202,
"avg_line_length": 30.714285714285715,
"alnum_prop": 0.5459644322845417,
"repo_name": "kotyara1005/hello-rebbit",
"id": "1220ebac45aa0dee0f21987bf083e0364f3717f4",
"size": "7326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "domain_tracker/dnsdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2886"
},
{
"name": "Python",
"bytes": "32184"
}
],
"symlink_target": ""
}
|
"""Unit-test of module 'backend' in 'msbackup' package."""
import os
import filecmp
import shutil
import subprocess
import tempfile
import unittest
from msbackup.backend import File
from test.mock import TextFile
try:
import configparser
except ImportError:
from six.moves import configparser
try:
xrange
except NameError:
xrange = range
TEST_ROOT = os.path.abspath(os.path.dirname(__file__))
class BackendFile(unittest.TestCase):
"""Test case of module 'backend' of 'msbackup' package."""
@classmethod
def setUpClass(cls):
"Setting up class fixture before running tests in the class."
config = configparser.RawConfigParser()
config.read(os.path.join(TEST_ROOT, 'test.config'))
cls.out = TextFile()
cls.err = TextFile()
cls.backend = File(config, out=cls.out, err=cls.err)
def setUp(self):
"""Setting up the test case."""
self.test_dir = tempfile.mkdtemp('_msbackup-test_backend')
fout, self.test_file = tempfile.mkstemp(dir=self.test_dir)
os.write(fout, os.urandom(16*1024))
os.close(fout)
def tearDown(self):
"""Tear down the test case."""
self.out.data = u''
self.err.data = u''
shutil.rmtree(self.test_dir, True)
def check(self):
"""Check results of executing back-end method."""
archive_path = u'{}.tar.bz2'.format(self.test_file)
self.assertTrue(os.path.exists(archive_path))
origin = u'{}.origin'.format(self.test_file)
os.rename(self.test_file, origin)
params = [u'/bin/tar', u'-xjf', archive_path,
u'-C', os.path.dirname(archive_path)]
with open(os.devnull, 'w') as out:
self.assertEqual(0, subprocess.call(params, stdout=out))
self.assertTrue(os.path.exists(self.test_file))
self.assertTrue(filecmp.cmp(origin, self.test_file, shallow=False))
def test_archive(self):
"""Test of method backend.File.archive()."""
output = u'{}.tar.bz2'.format(self.test_file)
src = os.path.basename(self.test_file)
base_dir = os.path.dirname(output)
self.assertEqual(0, self.backend.archive(src, output, base_dir))
self.check()
self.assertEqual(u'', self.out.data)
self.assertEqual(u'', self.err.data)
def test_backup(self):
"""Test of method backend.File.backup()."""
self.assertEqual(0, self.backend.backup(self.test_file,
self.test_dir))
self.check()
self.assertEqual(u'', self.out.data)
self.assertEqual(u'', self.err.data)
def test_backup_verbose(self):
"""Test of method backend.File.backup() with verbose output."""
self.assertEqual(0, self.backend.backup(self.test_file,
self.test_dir,
verbose=True))
self.check()
self.assertEqual(u'Backup of {}\n'.format(self.test_file),
self.out.data)
self.assertEqual(u'', self.err.data)
if __name__ == "__main__":
import xmlrunner
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports'))
|
{
"content_hash": "71a91a2cce1d8ff35aa9800415c5ccd1",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 76,
"avg_line_length": 33.55670103092783,
"alnum_prop": 0.6018433179723502,
"repo_name": "Aleksei-Badyaev/msbackup",
"id": "233acb86a9d265e51b77502ec300492ba7cf0615",
"size": "3279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_backend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61973"
},
{
"name": "Shell",
"bytes": "431"
}
],
"symlink_target": ""
}
|
'''
Created on 2009-12-29
@author: cfournie
'''
import util
import DocumentStructure
from lxml import etree
IGNORE = 'ignore'
IGNORE_TRUE = 'true'
IGNORE_FALSE = 'false'
ABBR = 'abbrFound'
ABBR_TRUE = 'true'
ABBR_FALSE = 'false'
class _ParseXHTML_Abstract(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
def handleReplace(self, dstree, dsnode, value):
util.appendToValue(dsnode, value)
def handleReplaceQuoted(self, dstree, dsnode, value):
util.appendToValue(dsnode, value)
class _ParseXHTML_Base(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.abstractHandler = _ParseXHTML_Abstract()
self.peudodsnode = None
self.peudodsnodereplace = None
self.peudoTrigger = False
self.preInsertedParagraph = False
def handle(self, xmltree, xmlnode, dstree, dsnode):
# Determine whether to ignore
if xmlnode.get(IGNORE) == IGNORE_TRUE:
return dsnode
if xmlnode.getparent() != None:
parentTag = util.getTag(xmlnode.getparent())
if self.peudodsnode != None and parentTag != 'p':
self.peudoTrigger = True
dsnode = self.peudodsnode
# Process tag
tag = util.getTag(xmlnode)
if tag == 'em':
return self.__handle_em__(xmlnode, dstree, dsnode)
elif tag == 'html':
return self.__handle_html__(xmlnode, dstree, dsnode)
elif tag == 'title':
return self.__handle_title__(xmlnode, dstree, dsnode)
elif tag == 'abbr':
return self.__handle_abbr__(xmlnode, dstree, dsnode)
elif tag == 'acronym':
return self.__handle_acronym__(xmlnode, dstree, dsnode)
elif tag == 'address':
return self.__handle_address__(xmlnode, dstree, dsnode)
elif tag == 'blockquote':
return self.__handle_blockquote__(xmlnode, dstree, dsnode)
elif tag == 'br':
return self.__handle_br__(xmlnode, dstree, dsnode)
elif tag == 'cite':
return self.__handle_cite__(xmlnode, dstree, dsnode)
elif tag == 'dfn':
return self.__handle_dfn__(xmlnode, dstree, dsnode)
elif tag == 'em':
return self.__handle_em__(xmlnode, dstree, dsnode)
elif tag == 'h1':
return self.__handle_h1__(xmlnode, dstree, dsnode)
elif tag == 'h2':
return self.__handle_h2__(xmlnode, dstree, dsnode)
elif tag == 'h3':
return self.__handle_h3__(xmlnode, dstree, dsnode)
elif tag == 'h4':
return self.__handle_h4__(xmlnode, dstree, dsnode)
elif tag == 'h5':
return self.__handle_h5__(xmlnode, dstree, dsnode)
elif tag == 'h6':
return self.__handle_h6__(xmlnode, dstree, dsnode)
elif tag == 'kbd':
return self.__handle_kbd__(xmlnode, dstree, dsnode)
elif tag == 'p':
return self.__handle_p__(xmlnode, dstree, dsnode)
elif tag == 'pre':
return self.__handle_pre__(xmlnode, dstree, dsnode)
elif tag == 'q':
return self.__handle_q__(xmlnode, dstree, dsnode)
elif tag == 'samp':
return self.__handle_samp__(xmlnode, dstree, dsnode)
elif tag == 'span':
return self.__handle_span__(xmlnode, dstree, dsnode)
elif tag == 'strong':
return self.__handle_strong__(xmlnode, dstree, dsnode)
elif tag == 'var':
return self.__handle_var__(xmlnode, dstree, dsnode)
elif tag == 'a':
return self.__handle_a__(xmlnode, dstree, dsnode)
elif tag == 'dl':
return self.__handle_dl__(xmlnode, dstree, dsnode)
elif tag == 'dt':
return self.__handle_dt__(xmlnode, dstree, dsnode)
elif tag == 'dd':
return self.__handle_dd__(xmlnode, dstree, dsnode)
elif tag == 'ol':
return self.__handle_ol__(xmlnode, dstree, dsnode)
elif tag == 'ul':
return self.__handle_ul__(xmlnode, dstree, dsnode)
elif tag == 'li':
return self.__handle_li__(xmlnode, dstree, dsnode)
elif tag == 'b':
return self.__handle_b__(xmlnode, dstree, dsnode)
elif tag == 'big':
return self.__handle_big__(xmlnode, dstree, dsnode)
elif tag == 'i':
return self.__handle_i__(xmlnode, dstree, dsnode)
elif tag == 'small':
return self.__handle_small__(xmlnode, dstree, dsnode)
elif tag == 'sub':
return self.__handle_sub__(xmlnode, dstree, dsnode)
elif tag == 'sup':
return self.__handle_sup__(xmlnode, dstree, dsnode)
elif tag == 'tt':
return self.__handle_tt__(xmlnode, dstree, dsnode)
elif tag == 'del':
return self.__handle_del__(xmlnode, dstree, dsnode)
elif tag == 'ins':
return self.__handle_ins__(xmlnode, dstree, dsnode)
elif tag == 'caption':
return self.__handle_caption__(xmlnode, dstree, dsnode)
elif tag == 'table':
return self.__handle_table__(xmlnode, dstree, dsnode)
elif tag == 'img':
return self.__handle_img__(xmlnode, dstree, dsnode)
elif tag == 'tr':
# Ignore subtree
xmlchildtree = etree.ElementTree(xmlnode)
for xmlchildnode in xmlchildtree.iter(tag=etree.Element):
xmlchildnode.set(IGNORE, IGNORE_TRUE)
return dsnode
else:
return dsnode
def prepareNode(self, dstree, dsnode, level, indent = None):
curLevel = util.levelToInt(DocumentStructure.getLevel(dsnode))
levelAbove = util.levelToInt(util.levelAbove(level))
levelDesired = util.levelToInt(level)
if curLevel >= levelAbove:
while curLevel > levelAbove:
newLevel = util.levelBelow(DocumentStructure.getLevel(dsnode))
dsnode = dstree.addNode(dsnode, level = newLevel, indent = indent)
curLevel = util.levelToInt(DocumentStructure.getLevel(dsnode))
if level == DocumentStructure.PARAGRAPH:
self.preInsertedParagraph = True
dsnode = dstree.addNode(dsnode, level = DocumentStructure.PARAGRAPH, indent = indent)
else:
self.preInsertedParagraph = False
elif curLevel < levelAbove:
while curLevel < levelDesired:
dsnode = dsnode.getparent()
curLevel = util.levelToInt(DocumentStructure.getLevel(dsnode))
if level != DocumentStructure.PARAGRAPH:
dsnode = dsnode.getparent()
DocumentStructure.setIndent(dsnode, indent)
return dsnode
def isIndent(self, xmlnode):
tag = util.getTag(xmlnode)
return tag == 'blockquote'
def handleIndent(self, dsnode, xmlnode):
indent = DocumentStructure.getIndent(xmlnode)
DocumentStructure.setIndent(dsnode, indent)
return dsnode
def __handle_body__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return dsnode
def __handle_html__(self, xmlnode, dstree, dsnode):
xmllang = None
if None in xmlnode.nsmap:
xmllang = '{' + xmlnode.nsmap[None] + '}lang'
if 'lang' in xmlnode.attrib:
dstree.setLang(xmlnode.attrib['lang'])
elif xmllang != None and xmllang in xmlnode.attrib:
dstree.setLang(xmlnode.attrib[xmllang])
return dsnode
def __handle_title__(self, xmlnode, dstree, dsnode):
# Assumes document node
if DocumentStructure.getLevel(dsnode) != DocumentStructure.DOCUMENT:
raise Exception, "Unexpected level"
DocumentStructure.setNode(dsnode, value=util.parseValueFrom(xmlnode))
return dsnode
def __handle_abbr__(self, xmlnode, dstree, dsnode):
# Tag subtree as ABBR
xmlchildtree = etree.ElementTree(xmlnode)
xmlchildlastnode = None
for xmlchildnode in xmlchildtree.iter(tag=etree.Element):
xmlchildnode.set(ABBR, ABBR_TRUE)
xmlchildlastnode = xmlchildnode
xmlchildlastnode.set(ABBR, ABBR_FALSE)
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode) + ' ')
return self.handleIndent(dsnode, xmlnode)
def __handle_acronym__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_address__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_blockquote__(self, xmlnode, dstree, dsnode):
# Ignore inner value, text data is not allowed
valueTail = xmlnode.tail
# Encounter paragraph data assumed to be without a pre-inserted p entry
if valueTail != None and len(valueTail.strip()) > 0:
dsnodeParent = dsnode
if util.getLevel(dsnodeParent) != util.levelAbove(DocumentStructure.PARAGRAPH):
dsnodeParent = util.getparent(dstree, dsnode)
self.peudodsnode = dstree.addNode(dsnodeParent, DocumentStructure.PARAGRAPH, value = valueTail)
self.peudodsnodereplace = dsnode
return dsnode
def __handle_br__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_cite__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_dfn__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_em__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_h1__(self, xmlnode, dstree, dsnode):
level = DocumentStructure.SECTION_L1
value = util.parseValueFrom(xmlnode)
dsnode = self.prepareNode(dstree, dsnode, level)
dsnode = dstree.addNode(dsnode, level, value = value)
return self.handleIndent(dsnode, xmlnode)
def __handle_h2__(self, xmlnode, dstree, dsnode):
level = DocumentStructure.SECTION_L2
value = util.parseValueFrom(xmlnode)
dsnode = self.prepareNode(dstree, dsnode, level)
dsnode = dstree.addNode(dsnode, level, value = value)
return self.handleIndent(dsnode, xmlnode)
def __handle_h3__(self, xmlnode, dstree, dsnode):
level = DocumentStructure.SECTION_L3
value = util.parseValueFrom(xmlnode)
dsnode = self.prepareNode(dstree, dsnode, level)
dsnode = dstree.addNode(dsnode, level, value = value)
return self.handleIndent(dsnode, xmlnode)
def __handle_h4__(self, xmlnode, dstree, dsnode):
level = DocumentStructure.SECTION_L4
value = util.parseValueFrom(xmlnode)
dsnode = self.prepareNode(dstree, dsnode, level)
dsnode = dstree.addNode(dsnode, level, value = value)
return self.handleIndent(dsnode, xmlnode)
def __handle_h5__(self, xmlnode, dstree, dsnode):
level = DocumentStructure.SECTION_L5
value = util.parseValueFrom(xmlnode)
dsnode = self.prepareNode(dstree, dsnode, level)
dsnode = dstree.addNode(dsnode, level, value = value)
return self.handleIndent(dsnode, xmlnode)
def __handle_h6__(self, xmlnode, dstree, dsnode):
level = DocumentStructure.SECTION_L6
value = util.parseValueFrom(xmlnode)
dsnode = self.prepareNode(dstree, dsnode, level)
dsnode = dstree.addNode(dsnode, level, value = value)
return self.handleIndent(dsnode, xmlnode)
def __handle_kbd__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplaceQuoted(dstree, dsnode, util.parseQuotedValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_p__(self, xmlnode, dstree, dsnode):
# End a pseudo-p trigger
if self.peudoTrigger == True:
self.peudoTrigger = False
dsnode = self.peudodsnodereplace
self.peudodsnodereplace = None
self.peudodsnode = None
valueText = xmlnode.text
valueTail = xmlnode.tail
if valueText != None:
valueText = valueText.strip()
if valueTail != None:
valueTail = valueTail.strip()
# Encounter a p tag with a pre-inserted p entry
if self.preInsertedParagraph == True:
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, valueText)
# Encounter a p tag without a pre-inserted p entry
else:
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
if self.preInsertedParagraph != True:
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
dsnode = util.getparent(dstree, dsnode)
dsnode = dstree.addNode(dsnode, level = DocumentStructure.PARAGRAPH, value = valueText)
else:
self.abstractHandler.handleReplace(dstree, dsnode, valueText)
self.preInsertedParagraph = False
# Encounter a p tag without a pre-inserted p entry
if valueTail != None and len(valueTail.strip()) > 0:
dsnodeParent = dsnode
if util.getLevel(dsnodeParent) != util.levelAbove(DocumentStructure.PARAGRAPH):
dsnodeParent = util.getparent(dstree, dsnode)
self.peudodsnode = dstree.addNode(dsnodeParent, DocumentStructure.PARAGRAPH, value = valueTail)
self.peudodsnodereplace = dsnode
return self.handleIndent(dsnode, xmlnode)
def __handle_pre__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_q__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplaceQuoted(dstree, dsnode, util.parseQuotedValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_samp__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplaceQuoted(dstree, dsnode, util.parseQuotedValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_span__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_strong__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_var__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplaceQuoted(dstree, dsnode, util.parseQuotedValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_a__(self, xmlnode, dstree, dsnode):
value = util.parseValueFrom(xmlnode)
if xmlnode.get(ABBR) != ABBR_TRUE:
value = value + ' '
self.abstractHandler.handleReplace(dstree, dsnode, value)
return self.handleIndent(dsnode, xmlnode)
def __handle_dl__(self, xmlnode, dstree, dsnode):
level = DocumentStructure.TEXT_SENTENCE
dsnode = self.prepareNode(dstree, dsnode, level)
dsnode = dstree.addNode(dsnode, level)
return self.handleIndent(dsnode, xmlnode)
def __handle_dt__(self, xmlnode, dstree, dsnode):
level = DocumentStructure.TEXT_CLAUSE
value = util.parseValueFrom(xmlnode)
dsnode = self.prepareNode(dstree, dsnode, level)
dsnode = dstree.addNode(dsnode, level, value = value)
return self.handleIndent(dsnode, xmlnode)
def __handle_dd__(self, xmlnode, dstree, dsnode):
level = DocumentStructure.TEXT_CLAUSE
value = util.parseValueFrom(xmlnode)
dsnode = self.prepareNode(dstree, dsnode, level)
dsnode = dstree.addNode(dsnode, level, value = value)
return self.handleIndent(dsnode, xmlnode)
def __handle_ol__(self, xmlnode, dstree, dsnode):
level = DocumentStructure.TEXT_SENTENCE
dsnode = self.prepareNode(dstree, dsnode, level)
dsnode = dstree.addNode(dsnode, level)
return self.handleIndent(dsnode, xmlnode)
def __handle_ul__(self, xmlnode, dstree, dsnode):
level = DocumentStructure.TEXT_SENTENCE
dsnode = self.prepareNode(dstree, dsnode, level)
dsnode = dstree.addNode(dsnode, level)
return self.handleIndent(dsnode, xmlnode)
def __handle_li__(self, xmlnode, dstree, dsnode):
level = DocumentStructure.TEXT_CLAUSE
value = util.parseValueFrom(xmlnode)
dsnode = self.prepareNode(dstree, dsnode, level)
dsnode = dstree.addNode(dsnode, level, value = value)
return self.handleIndent(dsnode, xmlnode)
def __handle_b__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_big__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_i__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_small__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_sub__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_sup__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_tt__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_del__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
# Ignore the inner data of xmlnode.text because it has been deleted
valueTail = xmlnode.tail
self.abstractHandler.handleReplace(dstree, dsnode, valueTail)
return self.handleIndent(dsnode, xmlnode)
def __handle_ins__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
self.abstractHandler.handleReplace(dstree, dsnode, util.parseValueFrom(xmlnode))
return self.handleIndent(dsnode, xmlnode)
def __handle_caption__(self, xmlnode, dstree, dsnode):
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
value = util.parseValueFrom(xmlnode)
if DocumentStructure.getLevel(dsnode) == DocumentStructure.PARAGRAPH:
value = util.ensureEndsInPeriod(value)
self.abstractHandler.handleReplace(dstree, dsnode, value)
return self.handleIndent(dsnode, xmlnode)
def __handle_table__(self, xmlnode, dstree, dsnode):
if 'summary' in xmlnode.attrib:
value = xmlnode.attrib['summary']
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
if DocumentStructure.getLevel(dsnode) == DocumentStructure.PARAGRAPH:
value = util.ensureEndsInPeriod(value)
util.appendToValue(dsnode, ' ' + value + ' ')
return self.handleIndent(dsnode, xmlnode)
def __handle_img__(self, xmlnode, dstree, dsnode):
value = ''
if 'alt' in xmlnode.attrib:
value = xmlnode.attrib['alt']
elif 'title' in xmlnode.attrib:
value = xmlnode.attrib['title']
dsnode = self.prepareNode(dstree, dsnode, DocumentStructure.PARAGRAPH)
if DocumentStructure.getLevel(dsnode) == DocumentStructure.PARAGRAPH:
value = util.ensureEndsInPeriod(value)
util.appendToValue(dsnode, ' ' + value + ' ')
return self.handleIndent(dsnode, xmlnode)
class ParseXHTML10(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.base = _ParseXHTML_Base()
def handle(self, xmltree, xmlnode, dstree, dsnode):
return self.base.handle(xmltree, xmlnode, dstree, dsnode)
def isIndent(self, xmlnode):
return self.base.isIndent(xmlnode)
class ParseXHTML11(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
self.base = ParseXHTML10()
def handle(self, xmltree, xmlnode, dstree, dsnode):
return self.base.handle(xmltree, xmlnode, dstree, dsnode)
def isIndent(self, xmlnode):
return self.base.isIndent(xmlnode)
|
{
"content_hash": "40876ad7446ae7253c7efcfc416370ba",
"timestamp": "",
"source": "github",
"line_count": 672,
"max_line_length": 107,
"avg_line_length": 37.21577380952381,
"alnum_prop": 0.6030229117517694,
"repo_name": "cfournie/docstruct",
"id": "18545d80620a6f56fea01978e2626fcdc03f28d9",
"size": "25009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "module/src/docstruct/ParseXHTML.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "44586"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import webobentrypoints
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'WebOb Entry points'
copyright = u'2014, Lennart Regebro'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = webobentrypoints.__version__
# The full version, including alpha/beta/rc tags.
release = webobentrypoints.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'webobentrypointsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'webobentrypoints.tex',
u'WebOb Entry points Documentation',
u'Lennart Regebro', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'webobentrypoints',
u'WebOb Entry points Documentation',
[u'Lennart Regebro'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'webobentrypoints',
u'WebOb Entry points Documentation',
u'Lennart Regebro',
'webobentrypoints',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "049ce884bc9b1b1e76dc38022883fdb4",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 76,
"avg_line_length": 31.05,
"alnum_prop": 0.705685618729097,
"repo_name": "regebro/webobentrypoints",
"id": "3394fca72d8a181b7afc51f5a2f4b724e3229aac",
"size": "8518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12473"
},
{
"name": "Shell",
"bytes": "6467"
}
],
"symlink_target": ""
}
|
from fakeredis import FakeRedis
import redis
from redlock import Redlock
from redlock_fifo.extendable_redlock import ExtendableRedlock
from time import time
import threading
class FakeRedisCustom(FakeRedis):
def __init__(self, db=0, charset='utf-8', errors='strict', **kwargs):
super(FakeRedisCustom, self).__init__(db, charset, errors, **kwargs)
self.fail_on_communicate = False
if 'host' in kwargs and kwargs['host'].endswith('.inactive'):
self.fail_on_communicate = True
def set(self, name, value, ex=None, px=None, nx=False, xx=False):
if self.fail_on_communicate:
raise redis.exceptions.ConnectionError
return super(FakeRedisCustom, self).set(name, value, ex, px, nx, xx)
def eval(self, script, nb_of_args, *args):
if self.fail_on_communicate:
raise redis.exceptions.ConnectionError
if script == Redlock.unlock_script:
if self.get(args[0]) == args[1]:
return self.delete(args[0])
else:
return 0
elif script == ExtendableRedlock.extend_script:
if self.get(args[0]) == args[1]:
return self.pexpire(args[0], args[2])
def pexpire(self, key, new_expiry_ms):
return self.expire(key, ms_to_seconds(new_expiry_ms))
def get_servers_pool(active, inactive):
redis_servers = []
for i in range(inactive):
server_name = "server%s.inactive" % i
redis_servers.append({"host": server_name, "port": 6379, 'db': server_name})
for i in range(active):
server_name = "server%s.active" % i
redis_servers.append({"host": server_name, "port": 6379, 'db': server_name})
return redis_servers
class ThreadCollection(object):
def __init__(self):
self.threads = []
def start(self, target, *args):
thread = threading.Thread(target=target, args=args)
thread.start()
self.threads.append(thread)
def join(self):
for t in self.threads:
t.join()
class TestTimer(object):
def __init__(self):
self.elapsed = time()
def get_elapsed(self):
return time()-self.elapsed
def ms_to_seconds(ms):
return float(ms)/1000
def seconds_to_ms(seconds):
return seconds*1000
|
{
"content_hash": "78ad014e06418ca1f2405a3bdfe7444d",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 84,
"avg_line_length": 28.75,
"alnum_prop": 0.6186956521739131,
"repo_name": "internap/redlock-fifo",
"id": "ee89c01a33a868a082c8dbf4294a0dc6b14996d0",
"size": "2873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/testutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26873"
}
],
"symlink_target": ""
}
|
import unittest, time, sys, random, math
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_kmeans, h2o_import as h2i
from operator import itemgetter
# want named tuples
import collections
# a truly uniform sphere
# http://stackoverflow.com/questions/5408276/python-uniform-spherical-distribution
# While the author prefers the discarding method for spheres, for completeness
# he offers the exact solution: http://stackoverflow.com/questions/918736/random-number-generator-that-produces-a-power-law-distribution/918782#918782
# In spherical coordinates, taking advantage of the sampling rule:
# http://stackoverflow.com/questions/2106503/pseudorandom-number-generator-exponential-distribution/2106568#2106568
CLUSTERS = 5
SPHERE_PTS = 100000
# BAD_SEED = None
# BAD_SEED = 5010213207974401134
BAD_SEED = 815071896901582303
MAX_ITER = 1000
TRIALS = 1
INIT='Furthest'
# INIT='PlusPlus'
# random doesn't seem to get good answer?
# INIT=''
# since the init is using unnormalized values for sum of squares calcs,
# biasing the count for large numbers for some spheres will mess it up
NOT_SO_BAD = False
# NOT_SO_BAD = False
def get_xyz_sphere(R):
phi = random.uniform(0, 2 * math.pi)
costheta = random.uniform(-1,1)
u = random.random() # 0 to 1
theta = math.acos(costheta)
r = R * (u ** (1.0/3))
# now you have a (r, theta, phi) group which can be transformed to (x, y, z)
# in the usual way
x = r * math.sin(theta) * math.cos(phi)
y = r * math.sin(theta) * math.sin(phi)
z = r * math.cos(theta)
### print [z,y,z]
return [x,y,z]
def write_spheres_dataset(csvPathname, CLUSTERS, n):
dsf = open(csvPathname, "w+")
# going to do a bunch of spheres, with differing # of pts and R
# R is radius of the spheres
# separate them by 3 * the previous R
# keep track of the centers so we compare to a sorted result from H2O
expectedCenters = []
currentCenter = None
totalRows = 0
print ""
for sphereCnt in range(CLUSTERS):
R = 10 * (sphereCnt+1)
newOffset = [3*R,3*R,3*R]
# figure out the next center
if currentCenter is None:
currentCenter = [0,0,0]
else:
currentCenter = [a+b for a,b in zip(currentCenter, newOffset)]
expectedCenters.append(currentCenter)
# build a sphere at that center
# pick a random # of points, from .5n to 1.5n
if NOT_SO_BAD:
numPts = random.randint(int(.5*n), int(1.5*n))
else:
numPts = n
print "currentCenter:", currentCenter, "R:", R, "numPts", numPts
for i in range(numPts):
xyz = get_xyz_sphere(R)
xyzShifted = [a+b for a,b in zip(xyz,currentCenter)]
dsf.write(",".join(map(str,xyzShifted))+"\n")
totalRows += 1
dsf.close()
print "Spheres created:", len(expectedCenters), "totalRows:", totalRows
return expectedCenters
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
# use the known bad seed if it's set. otherwise should be None
SEED = h2o.setup_random_seed(seed=BAD_SEED)
h2o.init(2)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_KMeans2_sphere5_bad_inits(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
csvFilename = 'syn_spheres100.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
expectedCenters = write_spheres_dataset(csvPathname, CLUSTERS, SPHERE_PTS)
print "\nStarting", csvFilename
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=csvFilename + ".hex")
# try 5 times, to see if all inits by h2o are good
savedResults = []
Result = collections.namedtuple('Result',
'trial clusters size cluster_variances error iterations normalized max_iter clustersSorted')
# save the best for comparison. Print messages when we update best
sameAsBest = 1
# big number? to init
bestResult = Result(None, None, None, None, None, None, None, None, None)
for trial in range(TRIALS):
# pass SEED so it's repeatable
kwargs = {
'normalize': 0,
'k': CLUSTERS,
'max_iter': MAX_ITER,
'initialization': INIT,
# 'initialization': 'PlusPlus',
'destination_key': 'syn_spheres100.hex',
'seed': SEED
}
timeoutSecs = 30
start = time.time()
kmeansResult = h2o_cmd.runKMeans(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
elapsed = time.time() - start
print "kmeans end on ", csvPathname, 'took', elapsed, 'seconds.',\
"%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
# see if we took the full limit to get an answer
# inspect of model doesn't work
# kmeansResult = h2o_cmd.runInspect(key='syn_spheres100.hex')
### print h2o.dump_json(kmeans)
### print h2o.dump_json(kmeansResult)
h2o_kmeans.simpleCheckKMeans(self, kmeansResult, **kwargs)
model = kmeansResult['model']
clusters = model["centers"]
size = model["size"]
cluster_variances = model["within_cluster_variances"]
# round to int to avoid fp error when saying "same"
error = int(model["total_within_SS"])
iterations = model["iterations"]
normalized = model["normalized"]
max_iter = model["max_iter"]
# clustersSorted = sorted(clusters, key=itemgetter(0))
clustersSorted = sorted(clusters)
r = Result (
trial,
clusters,
size,
cluster_variances,
error,
iterations,
normalized,
max_iter,
clustersSorted,
)
savedResults.append(r)
if iterations >= (max_iter-1): # h2o hits the limit at max_iter-1..shouldn't hit it
raise Exception("KMeans unexpectedly took %s iterations..which was the full amount allowed by max_iter %s",
(iterations, max_iter))
print "iterations", iterations
### print clustersSorted
# For now, just analyze the one with the lowest error
# we could analyze how many are not best, and how many are best (maybe just look at error
print "savedResults, error"
print r.error
if bestResult.error and r.error <= bestResult.error:
sameAsBest += 1
# we can check that if it has the same error, the sizes should be the same (integer) and reflects centers?
# should
if sorted(r.size)!=sorted(bestResult.size):
raise Exception("Would expect that if two trials got the same error (rounded to int), the cluster sizes would likely be the same? %s %s" %
(r.size, bestResult.size))
if not bestResult.error: # init case
bestResult = r
elif r.error < bestResult.error:
print "Trial", r.trial, "has a lower error", r.error, "than current lowest error", bestResult.error
print "Using it for best now"
bestResult = r
print "Trial #", trial, "completed"
print "\nApparently, %s out of %s trials, got the same best error: %s (lowest) " % (sameAsBest, TRIALS, bestResult.error)
print "\nh2o best result was from trial %s, centers sorted:" % bestResult.trial
print bestResult.clustersSorted
print "\ngenerated centers for comparison"
print expectedCenters
for i,center in enumerate(expectedCenters):
a = center
bb = bestResult.clustersSorted
print "bb:", bb
b = bb[i]
print "\nexpected:", a
print "h2o:", b # h2o result
aStr = ",".join(map(str,a))
bStr = ",".join(map(str,b))
iStr = str(i)
self.assertAlmostEqual(a[0], b[0], delta=2, msg=aStr+"!="+bStr+". Sorted cluster center "+iStr+"; x not correct.")
self.assertAlmostEqual(a[1], b[1], delta=2, msg=aStr+"!="+bStr+". Sorted cluster center "+iStr+"; y not correct.")
self.assertAlmostEqual(a[2], b[2], delta=2, msg=aStr+"!="+bStr+". Sorted cluster center "+iStr+"; z not correct.")
# fix: should check size too. Really should format expected into the tuple that the h2o_kmeans checker uses
# the c5 testdir_release stuff has a checker..for centers, size, error?
if __name__ == '__main__':
h2o.unit_main()
|
{
"content_hash": "f088bce646d7c9066e4e26fe875f7073",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 159,
"avg_line_length": 40.120535714285715,
"alnum_prop": 0.591409814176032,
"repo_name": "h2oai/h2o",
"id": "34aed7ef2884eea8a38e0e5edf06c33ba285dd62",
"size": "8987",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "py/testdir_multi_jvm/test_KMeans2_sphere5_bad_inits.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7065"
},
{
"name": "C",
"bytes": "2461"
},
{
"name": "CSS",
"bytes": "216906"
},
{
"name": "CoffeeScript",
"bytes": "205094"
},
{
"name": "Emacs Lisp",
"bytes": "7446"
},
{
"name": "Groovy",
"bytes": "518"
},
{
"name": "HTML",
"bytes": "177980"
},
{
"name": "Java",
"bytes": "5177683"
},
{
"name": "JavaScript",
"bytes": "42958"
},
{
"name": "Makefile",
"bytes": "50927"
},
{
"name": "PHP",
"bytes": "8490"
},
{
"name": "Perl",
"bytes": "22594"
},
{
"name": "Python",
"bytes": "3244626"
},
{
"name": "R",
"bytes": "1631216"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "39365"
},
{
"name": "Shell",
"bytes": "189829"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from .base import WhiteNoise
__version__ = '2.0.4'
__all__ = ['WhiteNoise']
|
{
"content_hash": "139ca21e4c1b550cd35390588a24787c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 38,
"avg_line_length": 16.857142857142858,
"alnum_prop": 0.652542372881356,
"repo_name": "hirokiky/whitenoise",
"id": "f981dfd9eea15a8907ac491aea804e07ecb2b6e5",
"size": "118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "whitenoise/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41803"
}
],
"symlink_target": ""
}
|
"""Support for collections of mapped entities.
The collections package supplies the machinery used to inform the ORM of
collection membership changes. An instrumentation via decoration approach is
used, allowing arbitrary types (including built-ins) to be used as entity
collections without requiring inheritance from a base class.
Instrumentation decoration relays membership change events to the
:class:`.CollectionAttributeImpl` that is currently managing the collection.
The decorators observe function call arguments and return values, tracking
entities entering or leaving the collection. Two decorator approaches are
provided. One is a bundle of generic decorators that map function arguments
and return values to events::
from sqlalchemy.orm.collections import collection
class MyClass(object):
# ...
@collection.adds(1)
def store(self, item):
self.data.append(item)
@collection.removes_return()
def pop(self):
return self.data.pop()
The second approach is a bundle of targeted decorators that wrap appropriate
append and remove notifiers around the mutation methods present in the
standard Python ``list``, ``set`` and ``dict`` interfaces. These could be
specified in terms of generic decorator recipes, but are instead hand-tooled
for increased efficiency. The targeted decorators occasionally implement
adapter-like behavior, such as mapping bulk-set methods (``extend``,
``update``, ``__setslice__``, etc.) into the series of atomic mutation events
that the ORM requires.
The targeted decorators are used internally for automatic instrumentation of
entity collection classes. Every collection class goes through a
transformation process roughly like so:
1. If the class is a built-in, substitute a trivial sub-class
2. Is this class already instrumented?
3. Add in generic decorators
4. Sniff out the collection interface through duck-typing
5. Add targeted decoration to any undecorated interface method
This process modifies the class at runtime, decorating methods and adding some
bookkeeping properties. This isn't possible (or desirable) for built-in
classes like ``list``, so trivial sub-classes are substituted to hold
decoration::
class InstrumentedList(list):
pass
Collection classes can be specified in ``relationship(collection_class=)`` as
types or a function that returns an instance. Collection classes are
inspected and instrumented during the mapper compilation phase. The
collection_class callable will be executed once to produce a specimen
instance, and the type of that specimen will be instrumented. Functions that
return built-in types like ``lists`` will be adapted to produce instrumented
instances.
When extending a known type like ``list``, additional decorations are not
generally not needed. Odds are, the extension method will delegate to a
method that's already instrumented. For example::
class QueueIsh(list):
def push(self, item):
self.append(item)
def shift(self):
return self.pop(0)
There's no need to decorate these methods. ``append`` and ``pop`` are already
instrumented as part of the ``list`` interface. Decorating them would fire
duplicate events, which should be avoided.
The targeted decoration tries not to rely on other methods in the underlying
collection class, but some are unavoidable. Many depend on 'read' methods
being present to properly instrument a 'write', for example, ``__setitem__``
needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also
reimplemented in terms of atomic appends and removes, so the ``extend``
decoration will actually perform many ``append`` operations and not call the
underlying method at all.
Tight control over bulk operation and the firing of events is also possible by
implementing the instrumentation internally in your methods. The basic
instrumentation package works under the general assumption that collection
mutation will not raise unusual exceptions. If you want to closely
orchestrate append and remove events with exception management, internal
instrumentation may be the answer. Within your method,
``collection_adapter(self)`` will retrieve an object that you can use for
explicit control over triggering append and remove events.
The owning object and :class:`.CollectionAttributeImpl` are also reachable
through the adapter, allowing for some very sophisticated behavior.
"""
import operator
import weakref
from sqlalchemy.util.compat import inspect_getfullargspec
from . import base
from .. import exc as sa_exc
from .. import util
from ..sql import coercions
from ..sql import expression
from ..sql import roles
__all__ = [
"collection",
"collection_adapter",
"mapped_collection",
"column_mapped_collection",
"attribute_mapped_collection",
]
__instrumentation_mutex = util.threading.Lock()
class _PlainColumnGetter(object):
"""Plain column getter, stores collection of Column objects
directly.
Serializes to a :class:`._SerializableColumnGetterV2`
which has more expensive __call__() performance
and some rare caveats.
"""
def __init__(self, cols):
self.cols = cols
self.composite = len(cols) > 1
def __reduce__(self):
return _SerializableColumnGetterV2._reduce_from_cols(self.cols)
def _cols(self, mapper):
return self.cols
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [
m._get_state_attr_by_column(state, state.dict, col)
for col in self._cols(m)
]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetter(object):
"""Column-based getter used in version 0.7.6 only.
Remains here for pickle compatibility with 0.7.6.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return _SerializableColumnGetter, (self.colkeys,)
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [
m._get_state_attr_by_column(
state, state.dict, m.mapped_table.columns[k]
)
for k in self.colkeys
]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetterV2(_PlainColumnGetter):
"""Updated serializable getter which deals with
multi-table mapped classes.
Two extremely unusual cases are not supported.
Mappings which have tables across multiple metadata
objects, or which are mapped to non-Table selectables
linked across inheriting mappers may fail to function
here.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return self.__class__, (self.colkeys,)
@classmethod
def _reduce_from_cols(cls, cols):
def _table_key(c):
if not isinstance(c.table, expression.TableClause):
return None
else:
return c.table.key
colkeys = [(c.key, _table_key(c)) for c in cols]
return _SerializableColumnGetterV2, (colkeys,)
def _cols(self, mapper):
cols = []
metadata = getattr(mapper.local_table, "metadata", None)
for (ckey, tkey) in self.colkeys:
if tkey is None or metadata is None or tkey not in metadata:
cols.append(mapper.local_table.c[ckey])
else:
cols.append(metadata.tables[tkey].c[ckey])
return cols
def column_mapped_collection(mapping_spec):
"""A dictionary-based collection type with column-based keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from mapping_spec, which may be a Column or a sequence
of Columns.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
cols = [
coercions.expect(roles.ColumnArgumentRole, q, argname="mapping_spec")
for q in util.to_list(mapping_spec)
]
keyfunc = _PlainColumnGetter(cols)
return lambda: MappedCollection(keyfunc)
class _SerializableAttrGetter(object):
def __init__(self, name):
self.name = name
self.getter = operator.attrgetter(name)
def __call__(self, target):
return self.getter(target)
def __reduce__(self):
return _SerializableAttrGetter, (self.name,)
def attribute_mapped_collection(attr_name):
"""A dictionary-based collection type with attribute-based keying.
Returns a :class:`.MappedCollection` factory with a keying based on the
'attr_name' attribute of entities in the collection, where ``attr_name``
is the string name of the attribute.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
getter = _SerializableAttrGetter(attr_name)
return lambda: MappedCollection(getter)
def mapped_collection(keyfunc):
"""A dictionary-based collection type with arbitrary keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from keyfunc, a callable that takes an entity and returns a
key value.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
return lambda: MappedCollection(keyfunc)
class collection(object):
"""Decorators for entity collection classes.
The decorators fall into two groups: annotations and interception recipes.
The annotating decorators (appender, remover, iterator, linker, converter,
internally_instrumented) indicate the method's purpose and take no
arguments. They are not written with parens::
@collection.appender
def append(self, append): ...
The recipe decorators all require parens, even those that take no
arguments::
@collection.adds('entity')
def insert(self, position, entity): ...
@collection.removes_return()
def popitem(self): ...
"""
# Bundled as a class solely for ease of use: packaging, doc strings,
# importability.
@staticmethod
def appender(fn):
"""Tag the method as the collection appender.
The appender method is called with one positional argument: the value
to append. The method will be automatically decorated with 'adds(1)'
if not already decorated::
@collection.appender
def add(self, append): ...
# or, equivalently
@collection.appender
@collection.adds(1)
def add(self, append): ...
# for mapping type, an 'append' may kick out a previous value
# that occupies that slot. consider d['a'] = 'foo'- any previous
# value in d['a'] is discarded.
@collection.appender
@collection.replaces(1)
def add(self, entity):
key = some_key_func(entity)
previous = None
if key in self:
previous = self[key]
self[key] = entity
return previous
If the value to append is not allowed in the collection, you may
raise an exception. Something to remember is that the appender
will be called for each object mapped by a database query. If the
database contains rows that violate your collection semantics, you
will need to get creative to fix the problem, as access via the
collection will not work.
If the appender method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = "appender"
return fn
@staticmethod
def remover(fn):
"""Tag the method as the collection remover.
The remover method is called with one positional argument: the value
to remove. The method will be automatically decorated with
:meth:`removes_return` if not already decorated::
@collection.remover
def zap(self, entity): ...
# or, equivalently
@collection.remover
@collection.removes_return()
def zap(self, ): ...
If the value to remove is not present in the collection, you may
raise an exception or return None to ignore the error.
If the remove method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = "remover"
return fn
@staticmethod
def iterator(fn):
"""Tag the method as the collection remover.
The iterator method is called with no arguments. It is expected to
return an iterator over all collection members::
@collection.iterator
def __iter__(self): ...
"""
fn._sa_instrument_role = "iterator"
return fn
@staticmethod
def internally_instrumented(fn):
"""Tag the method as instrumented.
This tag will prevent any decoration from being applied to the
method. Use this if you are orchestrating your own calls to
:func:`.collection_adapter` in one of the basic SQLAlchemy
interface methods, or to prevent an automatic ABC method
decoration from wrapping your implementation::
# normally an 'extend' method on a list-like class would be
# automatically intercepted and re-implemented in terms of
# SQLAlchemy events and append(). your implementation will
# never be called, unless:
@collection.internally_instrumented
def extend(self, items): ...
"""
fn._sa_instrumented = True
return fn
@staticmethod
@util.deprecated(
"1.0",
"The :meth:`.collection.linker` handler is deprecated and will "
"be removed in a future release. Please refer to the "
":meth:`.AttributeEvents.init_collection` "
"and :meth:`.AttributeEvents.dispose_collection` event handlers. ",
)
def linker(fn):
"""Tag the method as a "linked to attribute" event handler.
This optional event handler will be called when the collection class
is linked to or unlinked from the InstrumentedAttribute. It is
invoked immediately after the '_sa_adapter' property is set on
the instance. A single argument is passed: the collection adapter
that has been linked, or None if unlinking.
"""
fn._sa_instrument_role = "linker"
return fn
link = linker
"""Synonym for :meth:`.collection.linker`.
.. deprecated:: 1.0 - :meth:`.collection.link` is deprecated and will be
removed in a future release.
"""
@staticmethod
@util.deprecated(
"1.3",
"The :meth:`.collection.converter` handler is deprecated and will "
"be removed in a future release. Please refer to the "
":class:`.AttributeEvents.bulk_replace` listener interface in "
"conjunction with the :func:`.event.listen` function.",
)
def converter(fn):
"""Tag the method as the collection converter.
This optional method will be called when a collection is being
replaced entirely, as in::
myobj.acollection = [newvalue1, newvalue2]
The converter method will receive the object being assigned and should
return an iterable of values suitable for use by the ``appender``
method. A converter must not assign values or mutate the collection,
its sole job is to adapt the value the user provides into an iterable
of values for the ORM's use.
The default converter implementation will use duck-typing to do the
conversion. A dict-like collection will be convert into an iterable
of dictionary values, and other types will simply be iterated::
@collection.converter
def convert(self, other): ...
If the duck-typing of the object does not match the type of this
collection, a TypeError is raised.
Supply an implementation of this method if you want to expand the
range of possible types that can be assigned in bulk or perform
validation on the values about to be assigned.
"""
fn._sa_instrument_role = "converter"
return fn
@staticmethod
def adds(arg):
"""Mark the method as adding an entity to the collection.
Adds "add to collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value. Arguments can be specified positionally (i.e. integer) or by
name::
@collection.adds(1)
def push(self, item): ...
@collection.adds('entity')
def do_stuff(self, thing, entity=None): ...
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_append_event", arg)
return fn
return decorator
@staticmethod
def replaces(arg):
"""Mark the method as replacing an entity in the collection.
Adds "add to collection" and "remove from collection" handling to
the method. The decorator argument indicates which method argument
holds the SQLAlchemy-relevant value to be added, and return value, if
any will be considered the value to remove.
Arguments can be specified positionally (i.e. integer) or by name::
@collection.replaces(2)
def __setitem__(self, index, item): ...
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_append_event", arg)
fn._sa_instrument_after = "fire_remove_event"
return fn
return decorator
@staticmethod
def removes(arg):
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value to be removed. Arguments can be specified positionally (i.e.
integer) or by name::
@collection.removes(1)
def zap(self, item): ...
For methods where the value to remove is not known at call-time, use
collection.removes_return.
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_remove_event", arg)
return fn
return decorator
@staticmethod
def removes_return():
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The return
value of the method, if any, is considered the value to remove. The
method arguments are not inspected::
@collection.removes_return()
def pop(self): ...
For methods where the value to remove is known at call-time, use
collection.remove.
"""
def decorator(fn):
fn._sa_instrument_after = "fire_remove_event"
return fn
return decorator
collection_adapter = operator.attrgetter("_sa_adapter")
"""Fetch the :class:`.CollectionAdapter` for a collection."""
class CollectionAdapter(object):
"""Bridges between the ORM and arbitrary Python collections.
Proxies base-level collection operations (append, remove, iterate)
to the underlying Python collection, and emits add/remove events for
entities entering or leaving the collection.
The ORM uses :class:`.CollectionAdapter` exclusively for interaction with
entity collections.
"""
__slots__ = (
"attr",
"_key",
"_data",
"owner_state",
"_converter",
"invalidated",
"empty",
)
def __init__(self, attr, owner_state, data):
self.attr = attr
self._key = attr.key
self._data = weakref.ref(data)
self.owner_state = owner_state
data._sa_adapter = self
self._converter = data._sa_converter
self.invalidated = False
self.empty = False
def _warn_invalidated(self):
util.warn("This collection has been invalidated.")
@property
def data(self):
"The entity collection being adapted."
return self._data()
@property
def _referenced_by_owner(self):
"""return True if the owner state still refers to this collection.
This will return False within a bulk replace operation,
where this collection is the one being replaced.
"""
return self.owner_state.dict[self._key] is self._data()
def bulk_appender(self):
return self._data()._sa_appender
def append_with_event(self, item, initiator=None):
"""Add an entity to the collection, firing mutation events."""
self._data()._sa_appender(item, _sa_initiator=initiator)
def _set_empty(self, user_data):
assert (
not self.empty
), "This collection adapter is already in the 'empty' state"
self.empty = True
self.owner_state._empty_collections[self._key] = user_data
def _reset_empty(self):
assert (
self.empty
), "This collection adapter is not in the 'empty' state"
self.empty = False
self.owner_state.dict[
self._key
] = self.owner_state._empty_collections.pop(self._key)
def _refuse_empty(self):
raise sa_exc.InvalidRequestError(
"This is a special 'empty' collection which cannot accommodate "
"internal mutation operations"
)
def append_without_event(self, item):
"""Add or restore an entity to the collection, firing no events."""
if self.empty:
self._refuse_empty()
self._data()._sa_appender(item, _sa_initiator=False)
def append_multiple_without_event(self, items):
"""Add or restore an entity to the collection, firing no events."""
if self.empty:
self._refuse_empty()
appender = self._data()._sa_appender
for item in items:
appender(item, _sa_initiator=False)
def bulk_remover(self):
return self._data()._sa_remover
def remove_with_event(self, item, initiator=None):
"""Remove an entity from the collection, firing mutation events."""
self._data()._sa_remover(item, _sa_initiator=initiator)
def remove_without_event(self, item):
"""Remove an entity from the collection, firing no events."""
if self.empty:
self._refuse_empty()
self._data()._sa_remover(item, _sa_initiator=False)
def clear_with_event(self, initiator=None):
"""Empty the collection, firing a mutation event for each entity."""
if self.empty:
self._refuse_empty()
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=initiator)
def clear_without_event(self):
"""Empty the collection, firing no events."""
if self.empty:
self._refuse_empty()
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=False)
def __iter__(self):
"""Iterate over entities in the collection."""
return iter(self._data()._sa_iterator())
def __len__(self):
"""Count entities in the collection."""
return len(list(self._data()._sa_iterator()))
def __bool__(self):
return True
__nonzero__ = __bool__
def fire_append_event(self, item, initiator=None):
"""Notify that a entity has entered the collection.
Initiator is a token owned by the InstrumentedAttribute that
initiated the membership mutation, and should be left as None
unless you are passing along an initiator value from a chained
operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
return self.attr.fire_append_event(
self.owner_state, self.owner_state.dict, item, initiator
)
else:
return item
def fire_remove_event(self, item, initiator=None):
"""Notify that a entity has been removed from the collection.
Initiator is the InstrumentedAttribute that initiated the membership
mutation, and should be left as None unless you are passing along
an initiator value from a chained operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
self.attr.fire_remove_event(
self.owner_state, self.owner_state.dict, item, initiator
)
def fire_pre_remove_event(self, initiator=None):
"""Notify that an entity is about to be removed from the collection.
Only called if the entity cannot be removed after calling
fire_remove_event().
"""
if self.invalidated:
self._warn_invalidated()
self.attr.fire_pre_remove_event(
self.owner_state, self.owner_state.dict, initiator=initiator
)
def __getstate__(self):
return {
"key": self._key,
"owner_state": self.owner_state,
"owner_cls": self.owner_state.class_,
"data": self.data,
"invalidated": self.invalidated,
"empty": self.empty,
}
def __setstate__(self, d):
self._key = d["key"]
self.owner_state = d["owner_state"]
self._data = weakref.ref(d["data"])
self._converter = d["data"]._sa_converter
d["data"]._sa_adapter = self
self.invalidated = d["invalidated"]
self.attr = getattr(d["owner_cls"], self._key).impl
self.empty = d.get("empty", False)
def bulk_replace(values, existing_adapter, new_adapter, initiator=None):
"""Load a new collection, firing events based on prior like membership.
Appends instances in ``values`` onto the ``new_adapter``. Events will be
fired for any instance not present in the ``existing_adapter``. Any
instances in ``existing_adapter`` not present in ``values`` will have
remove events fired upon them.
:param values: An iterable of collection member instances
:param existing_adapter: A :class:`.CollectionAdapter` of
instances to be replaced
:param new_adapter: An empty :class:`.CollectionAdapter`
to load with ``values``
"""
assert isinstance(values, list)
idset = util.IdentitySet
existing_idset = idset(existing_adapter or ())
constants = existing_idset.intersection(values or ())
additions = idset(values or ()).difference(constants)
removals = existing_idset.difference(constants)
appender = new_adapter.bulk_appender()
for member in values or ():
if member in additions:
appender(member, _sa_initiator=initiator)
elif member in constants:
appender(member, _sa_initiator=False)
if existing_adapter:
for member in removals:
existing_adapter.fire_remove_event(member, initiator=initiator)
def prepare_instrumentation(factory):
"""Prepare a callable for future use as a collection class factory.
Given a collection class factory (either a type or no-arg callable),
return another factory that will produce compatible instances when
called.
This function is responsible for converting collection_class=list
into the run-time behavior of collection_class=InstrumentedList.
"""
# Convert a builtin to 'Instrumented*'
if factory in __canned_instrumentation:
factory = __canned_instrumentation[factory]
# Create a specimen
cls = type(factory())
# Did factory callable return a builtin?
if cls in __canned_instrumentation:
# Wrap it so that it returns our 'Instrumented*'
factory = __converting_factory(cls, factory)
cls = factory()
# Instrument the class if needed.
if __instrumentation_mutex.acquire():
try:
if getattr(cls, "_sa_instrumented", None) != id(cls):
_instrument_class(cls)
finally:
__instrumentation_mutex.release()
return factory
def __converting_factory(specimen_cls, original_factory):
"""Return a wrapper that converts a "canned" collection like
set, dict, list into the Instrumented* version.
"""
instrumented_cls = __canned_instrumentation[specimen_cls]
def wrapper():
collection = original_factory()
return instrumented_cls(collection)
# often flawed but better than nothing
wrapper.__name__ = "%sWrapper" % original_factory.__name__
wrapper.__doc__ = original_factory.__doc__
return wrapper
def _instrument_class(cls):
"""Modify methods in a class and install instrumentation."""
# In the normal call flow, a request for any of the 3 basic collection
# types is transformed into one of our trivial subclasses
# (e.g. InstrumentedList). Catch anything else that sneaks in here...
if cls.__module__ == "__builtin__":
raise sa_exc.ArgumentError(
"Can not instrument a built-in type. Use a "
"subclass, even a trivial one."
)
roles, methods = _locate_roles_and_methods(cls)
_setup_canned_roles(cls, roles, methods)
_assert_required_roles(cls, roles, methods)
_set_collection_attributes(cls, roles, methods)
def _locate_roles_and_methods(cls):
"""search for _sa_instrument_role-decorated methods in
method resolution order, assign to roles.
"""
roles = {}
methods = {}
for supercls in cls.__mro__:
for name, method in vars(supercls).items():
if not util.callable(method):
continue
# note role declarations
if hasattr(method, "_sa_instrument_role"):
role = method._sa_instrument_role
assert role in (
"appender",
"remover",
"iterator",
"linker",
"converter",
)
roles.setdefault(role, name)
# transfer instrumentation requests from decorated function
# to the combined queue
before, after = None, None
if hasattr(method, "_sa_instrument_before"):
op, argument = method._sa_instrument_before
assert op in ("fire_append_event", "fire_remove_event")
before = op, argument
if hasattr(method, "_sa_instrument_after"):
op = method._sa_instrument_after
assert op in ("fire_append_event", "fire_remove_event")
after = op
if before:
methods[name] = before + (after,)
elif after:
methods[name] = None, None, after
return roles, methods
def _setup_canned_roles(cls, roles, methods):
"""see if this class has "canned" roles based on a known
collection type (dict, set, list). Apply those roles
as needed to the "roles" dictionary, and also
prepare "decorator" methods
"""
collection_type = util.duck_type_collection(cls)
if collection_type in __interfaces:
canned_roles, decorators = __interfaces[collection_type]
for role, name in canned_roles.items():
roles.setdefault(role, name)
# apply ABC auto-decoration to methods that need it
for method, decorator in decorators.items():
fn = getattr(cls, method, None)
if (
fn
and method not in methods
and not hasattr(fn, "_sa_instrumented")
):
setattr(cls, method, decorator(fn))
def _assert_required_roles(cls, roles, methods):
"""ensure all roles are present, and apply implicit instrumentation if
needed
"""
if "appender" not in roles or not hasattr(cls, roles["appender"]):
raise sa_exc.ArgumentError(
"Type %s must elect an appender method to be "
"a collection class" % cls.__name__
)
elif roles["appender"] not in methods and not hasattr(
getattr(cls, roles["appender"]), "_sa_instrumented"
):
methods[roles["appender"]] = ("fire_append_event", 1, None)
if "remover" not in roles or not hasattr(cls, roles["remover"]):
raise sa_exc.ArgumentError(
"Type %s must elect a remover method to be "
"a collection class" % cls.__name__
)
elif roles["remover"] not in methods and not hasattr(
getattr(cls, roles["remover"]), "_sa_instrumented"
):
methods[roles["remover"]] = ("fire_remove_event", 1, None)
if "iterator" not in roles or not hasattr(cls, roles["iterator"]):
raise sa_exc.ArgumentError(
"Type %s must elect an iterator method to be "
"a collection class" % cls.__name__
)
def _set_collection_attributes(cls, roles, methods):
"""apply ad-hoc instrumentation from decorators, class-level defaults
and implicit role declarations
"""
for method_name, (before, argument, after) in methods.items():
setattr(
cls,
method_name,
_instrument_membership_mutator(
getattr(cls, method_name), before, argument, after
),
)
# intern the role map
for role, method_name in roles.items():
setattr(cls, "_sa_%s" % role, getattr(cls, method_name))
cls._sa_adapter = None
if not hasattr(cls, "_sa_converter"):
cls._sa_converter = None
cls._sa_instrumented = id(cls)
def _instrument_membership_mutator(method, before, argument, after):
"""Route method args and/or return value through the collection
adapter."""
# This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))'
if before:
fn_args = list(
util.flatten_iterator(inspect_getfullargspec(method)[0])
)
if isinstance(argument, int):
pos_arg = argument
named_arg = len(fn_args) > argument and fn_args[argument] or None
else:
if argument in fn_args:
pos_arg = fn_args.index(argument)
else:
pos_arg = None
named_arg = argument
del fn_args
def wrapper(*args, **kw):
if before:
if pos_arg is None:
if named_arg not in kw:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
value = kw[named_arg]
else:
if len(args) > pos_arg:
value = args[pos_arg]
elif named_arg in kw:
value = kw[named_arg]
else:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
initiator = kw.pop("_sa_initiator", None)
if initiator is False:
executor = None
else:
executor = args[0]._sa_adapter
if before and executor:
getattr(executor, before)(value, initiator)
if not after or not executor:
return method(*args, **kw)
else:
res = method(*args, **kw)
if res is not None:
getattr(executor, after)(res, initiator)
return res
wrapper._sa_instrumented = True
if hasattr(method, "_sa_instrument_role"):
wrapper._sa_instrument_role = method._sa_instrument_role
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
return wrapper
def __set(collection, item, _sa_initiator=None):
"""Run set events.
This event always occurs before the collection is actually mutated.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
item = executor.fire_append_event(item, _sa_initiator)
return item
def __del(collection, item, _sa_initiator=None):
"""Run del events.
This event occurs before the collection is actually mutated, *except*
in the case of a pop operation, in which case it occurs afterwards.
For pop operations, the __before_pop hook is called before the
operation occurs.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
executor.fire_remove_event(item, _sa_initiator)
def __before_pop(collection, _sa_initiator=None):
"""An event which occurs on a before a pop() operation occurs."""
executor = collection._sa_adapter
if executor:
executor.fire_pre_remove_event(_sa_initiator)
def _list_decorators():
"""Tailored instrumentation wrappers for any list-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(list, fn.__name__).__doc__
def append(fn):
def append(self, item, _sa_initiator=None):
item = __set(self, item, _sa_initiator)
fn(self, item)
_tidy(append)
return append
def remove(fn):
def remove(self, value, _sa_initiator=None):
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__eq__
fn(self, value)
_tidy(remove)
return remove
def insert(fn):
def insert(self, index, value):
value = __set(self, value)
fn(self, index, value)
_tidy(insert)
return insert
def __setitem__(fn):
def __setitem__(self, index, value):
if not isinstance(index, slice):
existing = self[index]
if existing is not None:
__del(self, existing)
value = __set(self, value)
fn(self, index, value)
else:
# slice assignment requires __delitem__, insert, __len__
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
if index.stop is not None:
stop = index.stop
else:
stop = len(self)
if stop < 0:
stop += len(self)
if step == 1:
for i in range(start, stop, step):
if len(self) > start:
del self[start]
for i, item in enumerate(value):
self.insert(i + start, item)
else:
rng = list(range(start, stop, step))
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s"
% (len(value), len(rng))
)
for i, item in zip(rng, value):
self.__setitem__(i, item)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, index):
if not isinstance(index, slice):
item = self[index]
__del(self, item)
fn(self, index)
else:
# slice deletion requires __getslice__ and a slice-groking
# __getitem__ for stepped deletion
# note: not breaking this into atomic dels
for item in self[index]:
__del(self, item)
fn(self, index)
_tidy(__delitem__)
return __delitem__
if util.py2k:
def __setslice__(fn):
def __setslice__(self, start, end, values):
for value in self[start:end]:
__del(self, value)
values = [__set(self, value) for value in values]
fn(self, start, end, values)
_tidy(__setslice__)
return __setslice__
def __delslice__(fn):
def __delslice__(self, start, end):
for value in self[start:end]:
__del(self, value)
fn(self, start, end)
_tidy(__delslice__)
return __delslice__
def extend(fn):
def extend(self, iterable):
for value in iterable:
self.append(value)
_tidy(extend)
return extend
def __iadd__(fn):
def __iadd__(self, iterable):
# list.__iadd__ takes any iterable and seems to let TypeError
# raise as-is instead of returning NotImplemented
for value in iterable:
self.append(value)
return self
_tidy(__iadd__)
return __iadd__
def pop(fn):
def pop(self, index=-1):
__before_pop(self)
item = fn(self, index)
__del(self, item)
return item
_tidy(pop)
return pop
if not util.py2k:
def clear(fn):
def clear(self, index=-1):
for item in self:
__del(self, item)
fn(self)
_tidy(clear)
return clear
# __imul__ : not wrapping this. all members of the collection are already
# present, so no need to fire appends... wrapping it with an explicit
# decorator is still possible, so events on *= can be had if they're
# desired. hard to imagine a use case for __imul__, though.
l = locals().copy()
l.pop("_tidy")
return l
def _dict_decorators():
"""Tailored instrumentation wrappers for any dict-like mapping class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(dict, fn.__name__).__doc__
Unspecified = util.symbol("Unspecified")
def __setitem__(fn):
def __setitem__(self, key, value, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
value = __set(self, value, _sa_initiator)
fn(self, key, value)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, key, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
fn(self, key)
_tidy(__delitem__)
return __delitem__
def clear(fn):
def clear(self):
for key in self:
__del(self, self[key])
fn(self)
_tidy(clear)
return clear
def pop(fn):
def pop(self, key, default=Unspecified):
__before_pop(self)
_to_del = key in self
if default is Unspecified:
item = fn(self, key)
else:
item = fn(self, key, default)
if _to_del:
__del(self, item)
return item
_tidy(pop)
return pop
def popitem(fn):
def popitem(self):
__before_pop(self)
item = fn(self)
__del(self, item[1])
return item
_tidy(popitem)
return popitem
def setdefault(fn):
def setdefault(self, key, default=None):
if key not in self:
self.__setitem__(key, default)
return default
else:
return self.__getitem__(key)
_tidy(setdefault)
return setdefault
def update(fn):
def update(self, __other=Unspecified, **kw):
if __other is not Unspecified:
if hasattr(__other, "keys"):
for key in list(__other):
if key not in self or self[key] is not __other[key]:
self[key] = __other[key]
else:
for key, value in __other:
if key not in self or self[key] is not value:
self[key] = value
for key in kw:
if key not in self or self[key] is not kw[key]:
self[key] = kw[key]
_tidy(update)
return update
l = locals().copy()
l.pop("_tidy")
l.pop("Unspecified")
return l
_set_binop_bases = (set, frozenset)
def _set_binops_check_strict(self, obj):
"""Allow only set, frozenset and self.__class__-derived
objects in binops."""
return isinstance(obj, _set_binop_bases + (self.__class__,))
def _set_binops_check_loose(self, obj):
"""Allow anything set-like to participate in set binops."""
return (
isinstance(obj, _set_binop_bases + (self.__class__,))
or util.duck_type_collection(obj) == set
)
def _set_decorators():
"""Tailored instrumentation wrappers for any set-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(set, fn.__name__).__doc__
Unspecified = util.symbol("Unspecified")
def add(fn):
def add(self, value, _sa_initiator=None):
if value not in self:
value = __set(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(add)
return add
def discard(fn):
def discard(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(discard)
return discard
def remove(fn):
def remove(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(remove)
return remove
def pop(fn):
def pop(self):
__before_pop(self)
item = fn(self)
# for set in particular, we have no way to access the item
# that will be popped before pop is called.
__del(self, item)
return item
_tidy(pop)
return pop
def clear(fn):
def clear(self):
for item in list(self):
self.remove(item)
_tidy(clear)
return clear
def update(fn):
def update(self, value):
for item in value:
self.add(item)
_tidy(update)
return update
def __ior__(fn):
def __ior__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.add(item)
return self
_tidy(__ior__)
return __ior__
def difference_update(fn):
def difference_update(self, value):
for item in value:
self.discard(item)
_tidy(difference_update)
return difference_update
def __isub__(fn):
def __isub__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.discard(item)
return self
_tidy(__isub__)
return __isub__
def intersection_update(fn):
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(intersection_update)
return intersection_update
def __iand__(fn):
def __iand__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__iand__)
return __iand__
def symmetric_difference_update(fn):
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(symmetric_difference_update)
return symmetric_difference_update
def __ixor__(fn):
def __ixor__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__ixor__)
return __ixor__
l = locals().copy()
l.pop("_tidy")
l.pop("Unspecified")
return l
class InstrumentedList(list):
"""An instrumented version of the built-in list."""
class InstrumentedSet(set):
"""An instrumented version of the built-in set."""
class InstrumentedDict(dict):
"""An instrumented version of the built-in dict."""
__canned_instrumentation = {
list: InstrumentedList,
set: InstrumentedSet,
dict: InstrumentedDict,
}
__interfaces = {
list: (
{"appender": "append", "remover": "remove", "iterator": "__iter__"},
_list_decorators(),
),
set: (
{"appender": "add", "remover": "remove", "iterator": "__iter__"},
_set_decorators(),
),
# decorators are required for dicts and object collections.
dict: ({"iterator": "values"}, _dict_decorators())
if util.py3k
else ({"iterator": "itervalues"}, _dict_decorators()),
}
class MappedCollection(dict):
"""A basic dictionary-based collection class.
Extends dict with the minimal bag semantics that collection
classes require. ``set`` and ``remove`` are implemented in terms
of a keying function: any callable that takes an object and
returns an object for use as a dictionary key.
"""
def __init__(self, keyfunc):
"""Create a new collection with keying provided by keyfunc.
keyfunc may be any callable that takes an object and returns an object
for use as a dictionary key.
The keyfunc will be called every time the ORM needs to add a member by
value-only (such as when loading instances from the database) or
remove a member. The usual cautions about dictionary keying apply-
``keyfunc(object)`` should return the same output for the life of the
collection. Keying based on mutable properties can result in
unreachable instances "lost" in the collection.
"""
self.keyfunc = keyfunc
@collection.appender
@collection.internally_instrumented
def set(self, value, _sa_initiator=None):
"""Add an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
self.__setitem__(key, value, _sa_initiator)
@collection.remover
@collection.internally_instrumented
def remove(self, value, _sa_initiator=None):
"""Remove an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
# Let self[key] raise if key is not in this collection
# testlib.pragma exempt:__ne__
if self[key] != value:
raise sa_exc.InvalidRequestError(
"Can not remove '%s': collection holds '%s' for key '%s'. "
"Possible cause: is the MappedCollection key function "
"based on mutable properties or properties that only obtain "
"values after flush?" % (value, self[key], key)
)
self.__delitem__(key, _sa_initiator)
# ensure instrumentation is associated with
# these built-in classes; if a user-defined class
# subclasses these and uses @internally_instrumented,
# the superclass is otherwise not instrumented.
# see [ticket:2406].
_instrument_class(MappedCollection)
_instrument_class(InstrumentedList)
_instrument_class(InstrumentedSet)
|
{
"content_hash": "f2d36894a11660de699b7f5dadd6992c",
"timestamp": "",
"source": "github",
"line_count": 1675,
"max_line_length": 78,
"avg_line_length": 32.05850746268657,
"alnum_prop": 0.5981787031174346,
"repo_name": "wujuguang/sqlalchemy",
"id": "ae96a8c4da59f9ecc6892b73beafda1b6172d4b5",
"size": "53937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/orm/collections.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "45930"
},
{
"name": "Python",
"bytes": "11287383"
}
],
"symlink_target": ""
}
|
"""
The basic iterator-classes for iterating over the blocks and transactions of
the blockchain.
"""
from collections import deque
from sortedcontainers import SortedList
from .defs import GENESIS_PREV_BLOCK_HASH, HEIGHT_SAFETY_MARGIN
from .misc import hash_hex_to_bytes, FilePos, Bunch
from .rawfiles import RawDataIterator
from .block import StoredBlock, deserialize_block
from .loggers import logger
################################################################################
# filtering
class BlockFilter:
"""
Represents start/stop criteria for blocks to include, based on height,
timestamp, and specific block identified by its hash.
"Start" is inclusive, "stop" is exclusive.
:note: Block timestamp is approximate. Blocks are not strictly ordered by timestamp.
"""
def __init__(self,
start_block_height = None, stop_block_height = None,
start_block_time = None, stop_block_time = None,
start_block_hash = None, stop_block_hash = None,
):
if start_block_height is not None or stop_block_height is not None:
self.block_height = ( start_block_height, stop_block_height )
else:
self.block_height = None
if start_block_time is not None or stop_block_time is not None:
self.block_time = ( start_block_time, stop_block_time )
else:
self.block_time = None
if start_block_hash is not None or stop_block_hash is not None:
# str to bytes
start_block_hash = hash_hex_to_bytes(start_block_hash) if isinstance(start_block_hash, str) else start_block_hash
stop_block_hash = hash_hex_to_bytes(stop_block_hash) if isinstance(stop_block_hash, str) else stop_block_hash
self.block_hash = ( start_block_hash, stop_block_hash )
else:
self.block_hash = None
def check_block(self, block, is_started):
"""
:return: True if need to include, False if need to exclude (i.e. before "start")
:raise: StopIteration if need to break (i.e. after "stop")
"""
if self.block_height is not None:
if not self._check(block.height, self.block_height, is_started):
return False
if self.block_time is not None:
if not self._check(block.timestamp, self.block_time, is_started):
return False
if self.block_hash is not None:
if not self._check(block.block_hash, self.block_hash, is_started, is_ordered = False):
return False
return True
def _check(self, value, boundaries, is_started, is_ordered = True):
# True, False, or raise StopIteration
start, stop = boundaries
if start is not None and not is_started:
# check if should start
if is_ordered:
if value < start:
# before the start
return False
else:
if value != start:
# before the start (haven't seen "start" yet)
return False
if stop is not None and is_started:
# check if should stop (note: stop is exclusive)
if is_ordered:
if value >= stop:
# at or after the end
raise StopIteration
else:
if value == stop:
# at the end
raise StopIteration
return True
def __repr__(self):
boundaries_str = ', '.join(
'%s.%s=%s' % (attr, side, v)
for attr, values in sorted(self.__dict__.items())
if values is not None
for side, v in zip(['start', 'stop'], values)
if v is not None
)
if not boundaries_str:
boundaries_str = '[include all]'
return '<%s %s>' % ( type(self).__name__, boundaries_str)
class _WorkingBlockFilter:
"""
A BlockFilter along with the state needed to apply it.
"""
def __init__(self, block_filter):
self.filter = block_filter
self.is_started = False
self.is_ended = False
def check_block(self, block):
"""
:return: True if need to include, False if need to exclude (i.e. before "start")
:raise: StopIteration if need to break (i.e. after "stop")
"""
if self.is_ended:
raise StopIteration
try:
should_include = self.filter.check_block(block, is_started = self.is_started)
if should_include:
self.is_started = True
return should_include
except StopIteration:
self.is_ended = True
raise
def __repr__(self):
return repr(self.filter).replace('BlockFilter', type(self).__name__)
################################################################################
# Blocks
class RawFileBlockIterator:
"""
Iterates over ALL blocks from `blk*.dat` files -- not only blocks included in
the longest-chain.
Blocks appear in "storage order", which is not necessarily chronological/topological
order.
No processing, validation, etc., is done on the blocks.
Element type is `StoredBlock`.
:note: Height is set to -1 for all blocks.
:note: This iterator is resumable and refreshable.
"""
def __init__(self, raw_data_iter = None, **kwargs):
"""
:param raw_data_iter: a RawDataIterator
:param kwargs: extra kwargs for RawDataIterator (ignored unless raw_data_iter is None)
"""
if raw_data_iter is None:
raw_data_iter = RawDataIterator(**kwargs)
self.raw_data_iter = raw_data_iter
# state
self._cur_blob = b''
self._cur_offset = 0
self._cur_filename = None
def __next__(self):
if self._cur_offset >= len(self._cur_blob):
# we're done with this blob. read the next one.
#if self._cur_blob is not None:
# assert self._cur_offset == len(self._cur_blob), (self._cur_offset, len(self._cur_blob))
self._read_next_blob() # raises StopIteration if no more files
block_offset = self._cur_offset
block = deserialize_block(self._cur_blob[block_offset : ], -1)
if block is None:
# past last block (in the last blk.dat file)
# refresh: check if new data was added to this blob since we read it
if self.refresh:
self._reread_blob()
block = deserialize_block(self._cur_blob[block_offset : ], -1)
if block is None:
# no new data, even after refreshing
raise StopIteration
self._cur_offset += 8 + block.rawsize
return StoredBlock(
block = block,
filepos = FilePos(self._cur_filename, block_offset),
)
def _read_next_blob(self):
data = self.raw_data_iter.__next__() # raises StopIteration if no more files . # easier to profile with x.__next__() instead of next(x)...
self._cur_blob = data.blob
self._cur_filename = data.filename
self._cur_offset = 0
def _reread_blob(self):
if self._cur_filename is not None:
# note: not updating self._cur_filename and self._cur_offset, because
# we need to keep reading from the same offset in the same file.
self._cur_blob = self.raw_data_iter.get_data(self._cur_filename).blob
def __iter__(self):
return self
@property
def refresh(self):
return self.raw_data_iter.refresh
class TopologicalBlockIterator:
"""
Iterates over *all* blocks from `blk*.dat` files (not only from longest chain).
Blocks are generated according to a topological order. This means
it is guaranteed a block will not appear before its "prev block" (indicated
by its "prev_block_hash").
Other than that, blocks from different forks can be generated in any order.
Element type is `Block`.
:note: This iterator is resumable and refreshable.
"""
def __init__(self, rawfile_block_iter = None, **kwargs):
"""
:param rawfile_block_iter: a RawFileBlockIterator
:param kwargs: extra kwargs for RawFileBlockIterator (ignored unless rawfile_block_iter is None)
"""
if rawfile_block_iter is None:
rawfile_block_iter = RawFileBlockIterator(**kwargs)
self.rawfile_block_iter = rawfile_block_iter
# state
self._height_by_hash = { GENESIS_PREV_BLOCK_HASH: -1 } # genesis is 0, so its prev is -1
self._orphans = {} # block_hash -> a list of orphan blocks waiting for it to appear
self._ready_blocks = deque() # blocks which can be released on next call to __next__()
def __next__(self):
# read more data if necessary
while not self._ready_blocks:
self._read_another_block()
# release a block
return self._get_next_block_to_release()
def _read_another_block(self):
# note: block.height is not set by RawFileBlockIterator
block = self.rawfile_block_iter.__next__().block # easier to profile with x.__next__() instead of next(x)...
#logger.debug('prev-block-reference: %s -> %s', block.block_hash_hex, block.prev_block_hash_hex) # commented out because hex() takes time...
# handle new block either as "ready" or "orphan":
height_by_hash = self._height_by_hash
prev_block_hash = block.prev_block_hash
prev_height = height_by_hash.get(prev_block_hash)
if prev_height is None:
# prev not found. orphan.
self._orphans.setdefault(prev_block_hash, []).append(block)
return False
else:
# prev found. block is "ready".
self._disorphanate_block(block, prev_height + 1)
return True
def _get_next_block_to_release(self):
# release a block from _ready_blocks, and disorphanate its children
block = self._ready_blocks.popleft()
self._disorphanate_children_of(block)
return block
def _disorphanate_children_of(self, block):
children = self._orphans.pop(block.block_hash, ())
child_height = block.height + 1
for child_block in children:
self._disorphanate_block(child_block, child_height)
def _disorphanate_block(self, child_block, height):
# block's height is known now. set it:
child_block.height = height
self._height_by_hash[child_block.block_hash] = height
# no longer orphan. it is ready for releasing:
self._ready_blocks.append(child_block) # appendright
def __iter__(self):
return self
################################################################################
# Longest chain
class LongestChainBlockIterator:
"""
Linearly iterates over blocks in the longest chain.
Denoting `B(i)` and `B(i+1)` as the i-th and i+1-th blocks in the sequence, this
iterator guarantees::
- `B(i+1).prev_block_hash == B(i).block_hash`
- `B(i+1).height == B(i).height + 1`
The height of the first block (genesis) is 0, and its `prev_block_hash` is all zeros.
Element type is `Block`.
:note: This iterator is resumable and refreshable.
"""
# TBD: an option to generate_unsafe_tail
DEFAULT_HEIGHT_SAFETY_MARGIN = HEIGHT_SAFETY_MARGIN
_DUMMY_PRE_GENESIS_BLOCK = Bunch(height = -1, block_hash = GENESIS_PREV_BLOCK_HASH)
def __init__(self, block_iter = None, height_safety_margin = None, block_filter = None, **kwargs):
"""
:param block_iter: a TopologicalBlockIterator
:param height_safety_margin:
how much longer should a fork be than a competing fork before we
can safely conclude it is the eventual "winner" fork.
:param block_filter: a BlockFilter, indicating blocks to start/stop at.
:param kwargs: extra kwargs for TopologicalBlockIterator (ignored unless block_iter is None)
"""
if block_iter is None:
block_iter = TopologicalBlockIterator(**kwargs)
self.block_iter = block_iter
if height_safety_margin is None:
height_safety_margin = self.DEFAULT_HEIGHT_SAFETY_MARGIN
self.height_safety_margin = height_safety_margin
if block_filter is not None:
block_filter = _WorkingBlockFilter(block_filter)
self.block_filter = block_filter
# state
root_block = self._DUMMY_PRE_GENESIS_BLOCK
self._root_block = root_block # the previous block released
self._last_block = root_block # the most recent block seen (not released yet)
self._blocks_by_hash = { root_block.block_hash: root_block } # block_hash -> block
self._block_children = { root_block.block_hash: []} # block_hash -> list of child blocks
self._leaf_heights = SortedList([ root_block.height ]) # block heights, of the leaf blocks only
def __next__(self):
while True:
block = self._get_next_block_to_release()
if block is not None:
self._root_block = block
if self._check_block(block):
return block
# no next block in pending blocks. need to read more data
self._read_another_block()
def _get_next_block_to_release(self):
if not self._check_heights_gap():
# longest chain not determined yet
return None
last_block = self._last_block
root_block = self._root_block
leaf_heights = self._leaf_heights
# since there's now another block to generate, it must be _last_block which tipped it over
assert last_block.height == leaf_heights[-1], (last_block.height, leaf_heights[-1])
# find next block to generate -- search backwards from leaf to root
next_block = self._find_child_from(last_block, root_block)
# trim the neglected chains
logger.debug('generating next root block %s', next_block)
self._discard_tree(root_block, survivor_child = next_block)
return next_block
def _discard_block(self, block):
"""
Remove a block from the data-structures representing the iterator state.
:return: the children of the block discarded
"""
block_hash = block.block_hash
logger.debug('discarding block %s', block_hash)
# remove from _blocks_by_hash:
self._blocks_by_hash.pop(block_hash)
# remove from _block_children:
children = self._block_children.pop(block_hash)
# remove from _leaf_heights (if there):
if not children:
# block is a leaf. need to remove it from _leaf_heights
self._leaf_heights.remove(block.height)
return children
def _discard_tree(self, block, survivor_child = None):
"""
recursively (DFS) discard a block and its children, except for its
"survivor" child, the one included in the longest chain.
"""
children = self._discard_block(block)
for child in children:
if child is not survivor_child:
self._discard_tree(child)
#else: don't discard the survivor
def _check_heights_gap(self):
"""
Is the longest fork leading by enough over the 2nd longest?
"""
leaf_heights = self._leaf_heights
height1 = leaf_heights[-1]
height2 = leaf_heights[-2] if len(leaf_heights) >= 2 else self._root_block.height
assert height1 >= height2, (height1, height2)
if height1 - height2 >= self.height_safety_margin:
# fork is leading by a large gap. can safely release next block
logger.debug('found next block to generate (cur leaf height = %s)', height1)
return True
else:
# don't generate next block yet
logger.debug('no next block to generate (cur leaf height = %s)', height1)
return False
def _find_child_from(self, block, root_block):
"""
:return: the direct child of `root_block`, in the route from `root_block` to `block`.
"""
blocks_by_hash = self._blocks_by_hash
root_block_hash = root_block.block_hash
while True:
prev_block_hash = block.prev_block_hash
if prev_block_hash == root_block_hash:
return block
block = blocks_by_hash[prev_block_hash]
def _read_another_block(self):
blocks_by_hash = self._blocks_by_hash
block_children = self._block_children
leaf_heights = self._leaf_heights
# fetch another block
block = self.block_iter.__next__() # easier to profile with x.__next__() instead of next(x)...
block_height = block.height
block_hash = block.block_hash
prev_block_hash = block.prev_block_hash
# find new block's prev block
try:
prev_block = blocks_by_hash[prev_block_hash]
if prev_block is not None:
assert prev_block.height + 1 == block_height, (prev_block.height, block_height)
except KeyError:
# already neglected
logger.info('block ignored (must be from a fork already deemed inferior): %s', block.block_hash_hex)
return
# update data structures with new block
logger.debug('adding block: %s', block)
self._last_block = block
blocks_by_hash[block_hash] = block
block_children[block_hash] = [] # no children seen yet, because each block appears before its children
prev_block_children = block_children[prev_block_hash]
is_prev_leaf = not prev_block_children
prev_block_children.append(block)
if is_prev_leaf:
# prev is not longer a leaf. need to remove it from leaf_heights
leaf_heights.remove(block_height - 1)
leaf_heights.add(block_height)
def _check_block(self, block):
"""
apply `block_filter` to `block`
"""
if self.block_filter is None:
return True
return self.block_filter.check_block(block)
def __iter__(self):
return self
def __repr__(self):
return '<%s at block #%r>' % ( type(self).__name__, self._root_block.height )
################################################################################
# Transactions
class TxIterator:
"""
Iterates over all transactions in longest chain.
Roughly equivalent to::
for block in LongestChainBlockIterator():
yield from block.iter_txs()
Element type is `Tx` (or `TxInBlock`, if `include_block_context=True`.
:note: This iterator is resumable and refreshable.
"""
def __init__(self, include_block_context = False, include_tx_blob = False, block_iter = None, **kwargs):
"""
:param block_iter: a LongestChainBlockIterator
:param kwargs: extra kwargs for LongestChainBlockIterator (ignored unless block_iter is None)
"""
if block_iter is None:
block_iter = LongestChainBlockIterator(**kwargs)
self.block_iter = block_iter
self.include_block_context = include_block_context
self.include_tx_blob = include_tx_blob
# state
self._block_txs = iter(()) # iterator over an empty sequence
def __next__(self):
while True:
try:
# return the next tx in this block:
tx = self._block_txs.__next__() # easier to profile with x.__next__() instead of next(x)...
return tx
except StopIteration:
# done with this block
pass
# proceed to next block:
self._block_txs = self._get_iter_of_next_block()
def _get_iter_of_next_block(self):
txs = self.block_iter.__next__().txs # easier to profile with x.__next__() instead of next(x)...
if self.include_block_context:
return txs.iter_txs_in_block(include_tx_blob = self.include_tx_blob)
else:
return txs.iter_txs(include_tx_blob = self.include_tx_blob)
def __iter__(self):
return self
def __repr__(self):
return '<%s at %r>' % ( type(self).__name__, self.block_iter )
################################################################################
|
{
"content_hash": "a72aa51bb5aad8ec4b7f432e72c5cc6e",
"timestamp": "",
"source": "github",
"line_count": 535,
"max_line_length": 148,
"avg_line_length": 38.822429906542055,
"alnum_prop": 0.580693307655272,
"repo_name": "fungibit/chainscan",
"id": "eb5b3909c45d477f2f8a47c00c7a3cdb25a881b4",
"size": "20770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainscan/scan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6210"
},
{
"name": "Python",
"bytes": "115337"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def fbcode_builder_spec(builder):
return {
'steps': [
# on macOS the filesystem is typically case insensitive.
# We need to ensure that the CWD is not the folly source
# dir when we build, otherwise the system will decide
# that `folly/String.h` is the file it wants when including
# `string.h` and the build will fail.
builder.fb_github_project_workdir('folly/_build'),
builder.cmake_install('facebook/folly'),
],
}
|
{
"content_hash": "33ae5307fb8e870afa521a12d919b858",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 37.55555555555556,
"alnum_prop": 0.628698224852071,
"repo_name": "phoad/rsocket-cpp",
"id": "3d128b9c07b2fcdfe6126aca5e95ac3e359b1d66",
"size": "749",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "build/fbcode_builder/specs/folly.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1776"
},
{
"name": "C++",
"bytes": "1167119"
},
{
"name": "CMake",
"bytes": "42749"
},
{
"name": "Python",
"bytes": "194459"
},
{
"name": "Ruby",
"bytes": "704"
},
{
"name": "Shell",
"bytes": "12981"
}
],
"symlink_target": ""
}
|
dna = input('Enter your primer sequence: ')
seqsize = len(dna)
if seqsize < 10:
print('The primer must have at least ten nucleotides')
elif seqsize < 25:
print('This size is OK')
else:
print('The primer is too long')
|
{
"content_hash": "60244a015552b49e7feb351670424d85",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 58,
"avg_line_length": 28.625,
"alnum_prop": 0.6768558951965066,
"repo_name": "Serulab/Py4Bio",
"id": "71960dc1287b75aa27ca26b9f95359bbf289065a",
"size": "229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/ch4/elif1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "616939"
},
{
"name": "Jupyter Notebook",
"bytes": "1260113"
},
{
"name": "Python",
"bytes": "533666"
},
{
"name": "Smarty",
"bytes": "1665"
},
{
"name": "TSQL",
"bytes": "3276"
}
],
"symlink_target": ""
}
|
"""Import to cause an exception, to test our hooks."""
__author__ = 'rob.galanakis@gmail.com'
raise NotImplementedError
|
{
"content_hash": "9744b6724e068957082904532ec6ef19",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 54,
"avg_line_length": 24.2,
"alnum_prop": 0.7272727272727273,
"repo_name": "rgalanakis/practicalmayapython",
"id": "1d492baf07fbc2359acfbcaf7f6f759d04ffa8ea",
"size": "121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/chapter3/_raise.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "213109"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
}
|
import argparse
import json
import os
import subprocess
import sys
import urllib2
from utils import commit
from utils import system
import patch
# //base and its dependencies
_base_deps = [
'base',
'testing',
'third_party/ashmem',
'third_party/libevent',
'third_party/libxml', # via //base/test
'third_party/modp_b64',
'third_party/tcmalloc',
]
# //build and its dependencies
_build_deps = [
'build',
'third_party/android_testrunner',
'third_party/binutils',
'third_party/instrumented_libraries',
'third_party/pymock',
'tools/android',
'tools/clang',
'tools/generate_library_loader',
'tools/gritsettings',
'tools/valgrind',
]
_chromium_libs = [
'url',
]
_third_party_deps = [
'third_party/android_platform',
'third_party/apple_apsl',
'third_party/brotli',
'third_party/expat',
'third_party/freetype-android',
'third_party/harfbuzz-ng',
'third_party/iccjpeg',
'third_party/jinja2',
'third_party/jsr-305',
'third_party/junit',
'third_party/libjpeg',
'third_party/libpng',
'third_party/libXNVCtrl',
'third_party/markupsafe',
'third_party/mesa',
'third_party/mockito',
'third_party/ots',
'third_party/ply',
'third_party/qcms',
'third_party/re2',
'third_party/robolectric',
'third_party/zlib',
]
dirs_from_chromium = _base_deps + _build_deps + _chromium_libs + _third_party_deps
dirs_from_mojo = [
'mojo/android',
'mojo/application',
'mojo/common',
'mojo/converters',
'mojo/dart',
'mojo/data_pipe_utils',
'mojo/edk',
'mojo/environment',
'mojo/icu',
'mojo/java',
'mojo/message_pump',
'mojo/services',
'services/asset_bundle',
'services/keyboard',
'services/sensors',
]
# The contents of these files before the roll will be preserved after the roll,
# even though they live in directories rolled in from Chromium.
files_not_to_roll = [
'build/config/ui.gni',
'build/ls.py',
'build/module_args/mojo.gni',
'tools/android/download_android_tools.py',
'tools/android/VERSION_LINUX_NDK',
'tools/android/VERSION_LINUX_SDK',
'tools/android/VERSION_MACOSX_NDK',
'tools/android/VERSION_MACOSX_SDK',
]
def rev(source_dir, dest_dir, dirs_to_rev, name):
for d in dirs_to_rev:
print "removing directory %s" % d
try:
system(["git", "rm", "-r", d], cwd=dest_dir)
except subprocess.CalledProcessError:
print "Could not remove %s" % d
print "cloning directory %s" % d
files = system(["git", "ls-files", d], cwd=source_dir)
for f in files.splitlines():
source_path = os.path.join(source_dir, f)
if not os.path.isfile(source_path):
continue
dest_path = os.path.join(dest_dir, f)
system(["mkdir", "-p", os.path.dirname(dest_path)], cwd=source_dir)
system(["cp", source_path, dest_path], cwd=source_dir)
system(["git", "add", d], cwd=dest_dir)
for f in files_not_to_roll:
system(["git", "checkout", "HEAD", f], cwd=dest_dir)
system(["git", "add", "."], cwd=dest_dir)
src_commit = system(["git", "rev-parse", "HEAD"], cwd=source_dir).strip()
commit("Update to %s %s" % (name, src_commit), cwd=dest_dir)
def main():
parser = argparse.ArgumentParser(description="Update the mojo repo's " +
"snapshot of things imported from chromium.")
parser.add_argument("--mojo-dir", type=str)
parser.add_argument("--chromium-dir", type=str)
parser.add_argument("--dest-dir", type=str)
args = parser.parse_args()
if args.mojo_dir:
rev(args.mojo_dir, args.dest_dir, dirs_from_mojo, 'mojo')
try:
patch.patch_and_filter(args.dest_dir,
os.path.join('patches', 'mojo'))
except subprocess.CalledProcessError:
print "ERROR: Roll failed due to a patch not applying"
print "Fix the patch to apply, commit the result, and re-run this script"
return 1
if args.chromium_dir:
rev(args.chromium_dir, args.dest_dir, dirs_from_chromium, 'chromium')
try:
patch.patch_and_filter(args.dest_dir,
os.path.join('patches', 'chromium'))
except subprocess.CalledProcessError:
print "ERROR: Roll failed due to a patch not applying"
print "Fix the patch to apply, commit the result, and re-run this script"
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "ea1b44109911f21c55afe6a0688c9b2a",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 83,
"avg_line_length": 28.30625,
"alnum_prop": 0.6180172223448885,
"repo_name": "qiankunshe/sky_engine",
"id": "4c9c8f1bdd7ea9588219f53d788d42de454cb814",
"size": "4714",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sky/tools/roll/roll.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "2706"
},
{
"name": "C",
"bytes": "304169"
},
{
"name": "C++",
"bytes": "22840831"
},
{
"name": "Dart",
"bytes": "1096771"
},
{
"name": "Groff",
"bytes": "29030"
},
{
"name": "HTML",
"bytes": "41854"
},
{
"name": "Java",
"bytes": "774384"
},
{
"name": "JavaScript",
"bytes": "27365"
},
{
"name": "Makefile",
"bytes": "402"
},
{
"name": "Objective-C",
"bytes": "106125"
},
{
"name": "Objective-C++",
"bytes": "435950"
},
{
"name": "Python",
"bytes": "4476918"
},
{
"name": "Shell",
"bytes": "173354"
},
{
"name": "Yacc",
"bytes": "31141"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
import numpy as np
import pytest
import cirq
def test_external():
for t in ['a', 1j]:
cirq.testing.assert_equivalent_repr(t)
cirq.testing.assert_equivalent_repr(t, setup_code='')
cirq.testing.assert_equivalent_repr(np.array([5]), setup_code='from numpy import array')
with pytest.raises(AssertionError, match='not defined'):
cirq.testing.assert_equivalent_repr(np.array([5]))
def test_custom_class_repr():
class CustomRepr:
# coverage: ignore
setup_code = """class CustomRepr:
def __init__(self, eq_val):
self.eq_val = eq_val
def __pow__(self, exponent):
return self
"""
def __init__(self, eq_val, repr_str: str):
self.eq_val = eq_val
self.repr_str = repr_str
def __eq__(self, other):
return self.eq_val == getattr(other, 'eq_val', None)
def __ne__(self, other):
return not self == other
def __repr__(self):
return self.repr_str
cirq.testing.assert_equivalent_repr(
CustomRepr('b', "CustomRepr('b')"), setup_code=CustomRepr.setup_code
)
cirq.testing.assert_equivalent_repr(
CustomRepr('a', "CustomRepr('a')"), setup_code=CustomRepr.setup_code
)
# Non-equal values.
with pytest.raises(AssertionError, match=r'eval\(repr\(value\)\): a'):
cirq.testing.assert_equivalent_repr(CustomRepr('a', "'a'"))
with pytest.raises(AssertionError, match=r'eval\(repr\(value\)\): 1'):
cirq.testing.assert_equivalent_repr(CustomRepr('a', "1"))
# Single failure out of many.
with pytest.raises(AssertionError, match=r'eval\(repr\(value\)\): a'):
cirq.testing.assert_equivalent_repr(CustomRepr('a', "'a'"))
# Syntax errors.
with pytest.raises(AssertionError, match='SyntaxError'):
cirq.testing.assert_equivalent_repr(CustomRepr('a', "("))
with pytest.raises(AssertionError, match='SyntaxError'):
cirq.testing.assert_equivalent_repr(CustomRepr('a', "return 1"))
# Not dottable.
with pytest.raises(AssertionError, match=r'dottable'):
cirq.testing.assert_equivalent_repr(
CustomRepr(5, "CustomRepr(5)**1"), setup_code=CustomRepr.setup_code
)
def test_imports_cirq_by_default():
cirq.testing.assert_equivalent_repr(cirq.NamedQubit('a'))
|
{
"content_hash": "ceea9a6a1c00d50f8aa9f593ace24e76",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 92,
"avg_line_length": 32.6986301369863,
"alnum_prop": 0.6154168412232929,
"repo_name": "quantumlib/Cirq",
"id": "de087b6196df835a3f5898cad87a65675b1fac97",
"size": "2972",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cirq-core/cirq/testing/equivalent_repr_eval_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4616"
},
{
"name": "HTML",
"bytes": "262"
},
{
"name": "JavaScript",
"bytes": "660"
},
{
"name": "Jupyter Notebook",
"bytes": "672675"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "8643017"
},
{
"name": "Scilab",
"bytes": "735"
},
{
"name": "Shell",
"bytes": "64230"
},
{
"name": "TypeScript",
"bytes": "91766"
}
],
"symlink_target": ""
}
|
"""Tests for the Google Drive Sync log log text parser plugin."""
import unittest
from plaso.parsers.text_plugins import gdrive_synclog
from tests.parsers.text_plugins import test_lib
class GoogleDriveSyncLogTextPluginTest(test_lib.TextPluginTestCase):
"""Tests for the Google Drive Sync log text parser plugin."""
def testProcess(self):
"""Tests the Process function."""
plugin = gdrive_synclog.GoogleDriveSyncLogTextPlugin()
storage_writer = self._ParseTextFileWithPlugin(['sync_log.log'], plugin)
number_of_event_data = storage_writer.GetNumberOfAttributeContainers(
'event_data')
self.assertEqual(number_of_event_data, 2190)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 2190)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
expected_event_values = {
'added_time': '2018-01-24T18:25:08.456-08:00',
'data_type': 'google_drive_sync_log:entry',
'level': 'INFO',
'message': 'SSL: OpenSSL 1.0.2m 2 Nov 2017',
'process_identifier': 2376,
'source_code': 'logging_config.py:299',
'thread': '7780:MainThread'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 2)
self.CheckEventData(event_data, expected_event_values)
def testProcessWithMacOSLog(self):
"""Tests the Process function with a MacOS Google Drive sync log."""
plugin = gdrive_synclog.GoogleDriveSyncLogTextPlugin()
storage_writer = self._ParseTextFileWithPlugin(['sync_log-osx.log'], plugin)
number_of_event_data = storage_writer.GetNumberOfAttributeContainers(
'event_data')
self.assertEqual(number_of_event_data, 2338)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 2338)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
expected_event_values = {
'added_time': '2018-03-01T12:48:14.224-08:00',
'data_type': 'google_drive_sync_log:entry',
'level': 'INFO',
'message': 'OS: Darwin/10.13.3',
'process_identifier': 1730,
'source_code': 'logging_config.pyo:295',
'thread': '140736280556352:MainThread'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 0)
self.CheckEventData(event_data, expected_event_values)
# Test change in local system time from -0800 to UTC.
# The switch occurs around line 215.
expected_event_values = {
'added_time': '2018-03-01T20:57:33.499+00:00',
'data_type': 'google_drive_sync_log:entry',
'level': 'INFO',
'message': 'SSL: OpenSSL 1.0.2n 7 Dec 2017',
'process_identifier': 2590,
'source_code': 'logging_config.pyo:299',
'thread': '140736280556352:MainThread'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 169)
self.CheckEventData(event_data, expected_event_values)
# Test with Unicode characters in filename.
expected_event_values = {
'added_time': '2018-03-05T03:09:15.806+00:00',
'data_type': 'google_drive_sync_log:entry',
'level': 'INFO',
'message': (
'Updating local entry local_id=LocalID(inode=870321, volume='
'\'60228B87-A626-4F5C-873E-476615F863C6\'), filename=АБВГДЕ.gdoc, '
'modified=1520218963, checksum=ab0618852c5d671d7b1b9191aef03bda, '
'size=185, is_folder=False'),
'process_identifier': 2608,
'source_code': 'snapshot_sqlite.pyo:219',
'thread': '123145558327296:Worker-1'}
event_data = storage_writer.GetAttributeContainerByIndex('event_data', 1400)
self.CheckEventData(event_data, expected_event_values)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "a02454b39b566c3847c0ad687cc43e86",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 80,
"avg_line_length": 38.763636363636365,
"alnum_prop": 0.6801125703564728,
"repo_name": "joachimmetz/plaso",
"id": "b34e51f093ad64ea9c81dcc855c387f5d0e1b8b3",
"size": "4317",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/parsers/text_plugins/gdrive_synclog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4301"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1305"
},
{
"name": "Python",
"bytes": "5345755"
},
{
"name": "Shell",
"bytes": "27279"
},
{
"name": "YARA",
"bytes": "507"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.