text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
Verifies build of an executable with C++ defines.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('defines.gyp')
test.build('defines.gyp')
expect = """\
FOO is defined
VALUE is 1
2*PAREN_VALUE is 12
"""
test.run_built_executable('defines', stdout=expect)
test.pass_test()
|
{
"content_hash": "33c234bb9847bb2b112a58fb8793172b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 51,
"avg_line_length": 14.55,
"alnum_prop": 0.697594501718213,
"repo_name": "creationix/gyp",
"id": "0b6d64b85539939216ccf0e2bb8524999fe9c691",
"size": "471",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "test/defines/gyptest-defines.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "400"
},
{
"name": "C",
"bytes": "27113"
},
{
"name": "C++",
"bytes": "3734"
},
{
"name": "Objective-C",
"bytes": "2888"
},
{
"name": "Python",
"bytes": "1078913"
},
{
"name": "Shell",
"bytes": "8888"
}
],
"symlink_target": ""
}
|
import re
import phonenumbers
from django import template
from django.utils.translation import gettext as _
from ..models import PhoneDevice
register = template.Library()
phone_mask = re.compile('(?<=.{3})[0-9](?=.{2})')
@register.filter
def mask_phone_number(number):
"""
Masks a phone number, only first 3 and last 2 digits visible.
Examples:
* `+31 * ******58`
:param number: str or phonenumber object
:return: str
"""
if isinstance(number, phonenumbers.PhoneNumber):
number = format_phone_number(number)
return phone_mask.sub('*', number)
@register.filter
def format_phone_number(number):
"""
Formats a phone number in international notation.
:param number: str or phonenumber object
:return: str
"""
if not isinstance(number, phonenumbers.PhoneNumber):
number = phonenumbers.parse(number)
return phonenumbers.format_number(number, phonenumbers.PhoneNumberFormat.INTERNATIONAL)
@register.filter
def device_action(device):
"""
Generates an actionable text for a :class:`~two_factor.models.PhoneDevice`.
Examples:
* Send text message to `+31 * ******58`
* Call number `+31 * ******58`
"""
assert isinstance(device, PhoneDevice)
number = mask_phone_number(format_phone_number(device.number))
if device.method == 'sms':
return _('Send text message to %s') % number
elif device.method == 'call':
return _('Call number %s') % number
raise NotImplementedError('Unknown method: %s' % device.method)
|
{
"content_hash": "bf170c143029f453a189e6add91c3e74",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 91,
"avg_line_length": 26.271186440677965,
"alnum_prop": 0.6664516129032259,
"repo_name": "Bouke/django-two-factor-auth",
"id": "13297865c82c4a501979cdeac234f5f6a79b60f5",
"size": "1550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "two_factor/templatetags/two_factor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21268"
},
{
"name": "Makefile",
"bytes": "955"
},
{
"name": "Python",
"bytes": "146804"
}
],
"symlink_target": ""
}
|
"""
The middleware that adds the surrogate keys.
"""
from selector import Selector
from .surrogates import current_uri_keys
def initialize_routes(config):
"""
Initialize a new selector that maps routes to surrogate-key
generation. At the moment this is an empty map to which things
can be added (see test_entity_to_keys).
However, it ought to be possible to put all the (relevant) routes
in this map and forgo the procedural code in current_uri_keys.
Another option is to, at startup, wrap handlers in another handler
which properly generates keys. That version needs to be tried to
see if it is more tidy than this.
"""
fastly_selector = Selector()
def not_found(environ, start_response):
return []
fastly_selector.status404 = not_found
fastly_selector.status405 = not_found
config['fastly.selector'] = fastly_selector
class KeyAdder(object):
"""
WSGI middlware that determines which (if any) surrogate keys
to add as a header to an outgoing request. This allows them
to be properly flushed from the fastly caches by a purge of
one of those keys.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
def replacement_start_response(status, headers, exc_info=None):
if environ['REQUEST_METHOD'] == 'GET':
surrogate_headers = current_uri_keys(environ)
if surrogate_headers:
headers.append(('Surrogate-Key',
' '.join(surrogate_headers)))
return start_response(status, headers, exc_info)
return self.application(environ, replacement_start_response)
|
{
"content_hash": "6909269d4a09c75d0af663cfcc38e666",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 71,
"avg_line_length": 32.27777777777778,
"alnum_prop": 0.6672403901319564,
"repo_name": "cdent/tiddlywebplugins.fastly",
"id": "c3cde39cb3edea9bfe6f869af471e9abb6e0fbe5",
"size": "1743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tiddlywebplugins/fastly/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "33568"
}
],
"symlink_target": ""
}
|
"""
Django settings for myproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%ovvje%lh&k-%0v!@_c1gygt#aq-!o3*t$(hpee7@aj&35cr3a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myapp',
'tastypie',
)
MIDDLEWARE = MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'myproject.urls'
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "ef35e59700e94401481ca6ecd84abcc6",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 71,
"avg_line_length": 24.31764705882353,
"alnum_prop": 0.7213352685050798,
"repo_name": "Perkville/django-tastypie",
"id": "e3b6ac1ba43ae0fd39ed6aeb855f480d168d3bab",
"size": "2067",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "docs/code/myproject/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "988"
},
{
"name": "Python",
"bytes": "830931"
},
{
"name": "Shell",
"bytes": "1253"
}
],
"symlink_target": ""
}
|
import threading
# django
from django.core.mail import send_mail
# local django
from user import constants
class SendMail(threading.Thread):
"""
Responsible to send email in background.
"""
def __init__(self, email, HealthProfessional, SendInvitationProfile):
self.email = email
self.HealthProfessional = HealthProfessional
self.SendInvitationProfile = SendInvitationProfile
threading.Thread.__init__(self)
def run(self):
email_subject = constants.INVITATION_EMAIL_SUBJECT
email_body = constants.INVITATION_EMAIL_BODY
send_mail(email_subject, email_body % (self.HealthProfessional.name,
self.SendInvitationProfile.activation_key),
'medicalprescriptionapp@gmail.com', [self.email], fail_silently=False)
|
{
"content_hash": "277e119038280df518d954644a563c66",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 90,
"avg_line_length": 32.65384615384615,
"alnum_prop": 0.6666666666666666,
"repo_name": "fga-gpp-mds/2017.2-Receituario-Medico",
"id": "f74184a99db0ef4c8a715360c6d769d1aa83ebc9",
"size": "868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "medical_prescription/user/views/sendmail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2123328"
},
{
"name": "CoffeeScript",
"bytes": "102158"
},
{
"name": "HTML",
"bytes": "2703462"
},
{
"name": "JavaScript",
"bytes": "7544427"
},
{
"name": "Makefile",
"bytes": "1369"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PowerShell",
"bytes": "471"
},
{
"name": "Python",
"bytes": "627321"
},
{
"name": "Ruby",
"bytes": "1030"
},
{
"name": "Shell",
"bytes": "3774"
}
],
"symlink_target": ""
}
|
from django.contrib.syndication.views import Feed
from django.conf import settings
from django.shortcuts import get_object_or_404
from website_showroom.models import Edition, EditionWebsite
class RssFeed(Feed):
title = "Chicagocrime.org site news"
link = "/rss/"
description = "Updates on changes and additions to chicagocrime.org."
def get_object(self, request, ed_country):
ed = Edition.objects.get(country=ed_country)
return get_object_or_404(EditionWebsite, edition=ed)
def items(self):
return EditionWebsite.objects.order_by('-pub_date')[:12]
def item_title(self, item):
return item.get_title
def item_link(self, item):
return item.website.url
def item_description(self, item):
return item.desc
|
{
"content_hash": "ab86fdee82a433cf361899ab4f4183ac",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 73,
"avg_line_length": 30.26923076923077,
"alnum_prop": 0.7001270648030495,
"repo_name": "holgerd77/django-website-showroom",
"id": "b57d70c1433b4564ebc0d5d8a6a6895185a99f0b",
"size": "787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website_showroom/feeds.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2185"
},
{
"name": "HTML",
"bytes": "8788"
},
{
"name": "Python",
"bytes": "27449"
}
],
"symlink_target": ""
}
|
"""
__MT_post__Operation.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: gehan
Modified: Sun Feb 15 10:31:27 2015
____________________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3Text import *
from ATOM3String import *
from graph_MT_post__Operation import *
class MT_post__Operation(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = ['MT_post__Expression']
self.graphClass_ = graph_MT_post__Operation
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.MT_post__Type=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_post__name=ATOM3Text('\n#===============================================================================\n# You can access the value of the current node\'s attribute value by: attr_value.\n# If the current node shall be created you MUST initialize it here!\n# You can access a node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# Note that the attribute values are those before the match is rewritten.\n# The order in which this code is executed depends on the label value\n# of the encapsulating node.\n# The given action must return the new value of the attribute.\n#===============================================================================\n\nreturn attr_value\n', 80,15 )
self.MT_label__=ATOM3String('', 20)
self.MT_pivotOut__=ATOM3String('', 20)
self.generatedAttributes = {'MT_post__Type': ('ATOM3Text', ),
'MT_post__name': ('ATOM3Text', ),
'MT_label__': ('ATOM3String', ),
'MT_pivotOut__': ('ATOM3String', ) }
self.realOrder = ['MT_post__Type','MT_post__name','MT_label__','MT_pivotOut__']
self.directEditing = [0,0,1,1]
def clone(self):
cloneObject = MT_post__Operation( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if actionID == self.CREATE:
self.autoIncrLabel(params)
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <---- Remove this to use QOCA
""" Get the high level constraint helper and solver """
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.fixedWidth(self.graphObject_, self.graphObject_.sizeX)
oc.fixedHeight(self.graphObject_, self.graphObject_.sizeY)
def autoIncrLabel(self, params):
#===============================================================================
# Auto increment the label
#===============================================================================
# If there is already one, ignore
if not self.MT_label__.isNone(): return
# Get the maximum label of all MT_pre__ elements
label = 0
for nt in self.parent.ASGroot.listNodes:
if nt.startswith('MT_post__'):
for node in self.parent.ASGroot.listNodes[nt]:
currLabel = 0
try:
currLabel = int(node.MT_label__.getValue())
except:
pass
if currLabel > label:
label = currLabel
# The label of this instance will be the max label + 1
self.MT_label__.setValue(str(label + 1))
|
{
"content_hash": "c124babf52a7b0689be22eafe21a4662",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 745,
"avg_line_length": 48.99145299145299,
"alnum_prop": 0.5680390788555478,
"repo_name": "levilucio/SyVOLT",
"id": "796dccb990cd83d13d7238b5b81e5a1b4be55c7f",
"size": "5732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/MT_post__Operation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
from typing import NamedTuple
import kfp
from kfp.components import func_to_container_op
# Stabilizing the test output
class StableIDGenerator:
def __init__(self, ):
self._index = 0
def get_next_id(self, ):
self._index += 1
return '{code:0{num_chars:}d}'.format(code=self._index, num_chars=kfp.dsl._for_loop.LoopArguments.NUM_CODE_CHARS)
kfp.dsl.ParallelFor._get_unique_id_code = StableIDGenerator().get_next_id
@func_to_container_op
def produce_str() -> str:
return "Hello"
@func_to_container_op
def produce_list_of_dicts() -> list:
return ([{"aaa": "aaa1", "bbb": "bbb1"}, {"aaa": "aaa2", "bbb": "bbb2"}],)
@func_to_container_op
def produce_list_of_strings() -> list:
return (["a", "z"],)
@func_to_container_op
def produce_list_of_ints() -> list:
return ([1234567890, 987654321],)
@func_to_container_op
def consume(param1):
print(param1)
@kfp.dsl.pipeline()
def parallelfor_item_argument_resolving():
produce_str_task = produce_str()
produce_list_of_strings_task = produce_list_of_strings()
produce_list_of_ints_task = produce_list_of_ints()
produce_list_of_dicts_task = produce_list_of_dicts()
with kfp.dsl.ParallelFor(produce_list_of_strings_task.output) as loop_item:
consume(produce_list_of_strings_task.output)
consume(loop_item)
consume(produce_str_task.output)
with kfp.dsl.ParallelFor(produce_list_of_ints_task.output) as loop_item:
consume(produce_list_of_ints_task.output)
consume(loop_item)
with kfp.dsl.ParallelFor(produce_list_of_dicts_task.output) as loop_item:
consume(produce_list_of_dicts_task.output)
#consume(loop_item) # Cannot use the full loop item when it's a dict
consume(loop_item.aaa)
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(parallelfor_item_argument_resolving, __file__ + '.yaml')
|
{
"content_hash": "ae8cdc413e229f2c7e9c03ac968357ae",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 121,
"avg_line_length": 28,
"alnum_prop": 0.6718426501035196,
"repo_name": "kubeflow/kfp-tekton-backend",
"id": "ebcb8c28024035d137d616a35e9ed460fd571510",
"size": "2531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdk/python/tests/compiler/testdata/parallelfor_item_argument_resolving.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "47293"
},
{
"name": "Go",
"bytes": "1269081"
},
{
"name": "HTML",
"bytes": "3584"
},
{
"name": "JavaScript",
"bytes": "24828"
},
{
"name": "Jupyter Notebook",
"bytes": "177616"
},
{
"name": "Makefile",
"bytes": "9694"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "1628570"
},
{
"name": "Scala",
"bytes": "13000"
},
{
"name": "Shell",
"bytes": "180020"
},
{
"name": "Smarty",
"bytes": "7694"
},
{
"name": "Starlark",
"bytes": "76037"
},
{
"name": "TypeScript",
"bytes": "1641150"
}
],
"symlink_target": ""
}
|
from ..adapters.teradata import Teradata
from .base import SQLDialect
from . import dialects, sqltype_for
@dialects.register_for(Teradata)
class TeradataDialect(SQLDialect):
@sqltype_for('integer')
def type_integer(self):
return 'INT'
@sqltype_for('text')
def type_text(self):
return 'VARCHAR(2000)'
@sqltype_for('json')
def type_json(self):
return 'VARCHAR(4000)'
@sqltype_for('float')
def type_float(self):
return 'REAL'
@sqltype_for('list:integer')
def type_list_integer(self):
return self.types['json']
@sqltype_for('list:string')
def type_list_string(self):
return self.types['json']
@sqltype_for('list:reference')
def type_list_reference(self):
return self.types['json']
@sqltype_for('bigint')
def type_bigint(self):
return 'BIGINT'
@sqltype_for('id')
def type_id(self):
return 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL'
@sqltype_for('big-id')
def type_big_id(self):
return 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL'
@sqltype_for('reference')
def type_reference(self):
return 'INT'
@sqltype_for('big-reference')
def type_big_reference(self):
return 'BIGINT'
@sqltype_for('geometry')
def type_geometry(self):
return 'ST_GEOMETRY'
@sqltype_for('reference FK')
def type_reference_fk(self):
return ' REFERENCES %(foreign_key)s '
@sqltype_for('reference TFK')
def type_reference_tfk(self):
return ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s' + \
' (%(foreign_key)s)'
def left_join(self, val):
return 'LEFT OUTER JOIN %s' % val
def select(self, fields, tables, where=None, groupby=None, having=None,
orderby=None, limitby=None, distinct=False, for_update=False):
dst, whr, grp, order, limit, offset, upd = '', '', '', '', '', '', ''
if distinct is True:
dst = ' DISTINCT'
elif distinct:
dst = ' DISTINCT ON (%s)' % distinct
if where:
whr = ' %s' % self.where(where)
if groupby:
grp = ' GROUP BY %s' % groupby
if having:
grp += ' HAVING %s' % having
if orderby:
order = ' ORDER BY %s' % orderby
if limitby:
(lmin, lmax) = limitby
limit = ' TOP %i' % lmax
if for_update:
upd = ' FOR UPDATE'
return 'SELECT%s%s %s FROM %s%s%s%s%s%s;' % (
dst, limit, fields, tables, whr, grp, order, offset, upd)
def truncate(self, table, mode=''):
return ['DELETE FROM %s ALL;' % table._tablename]
|
{
"content_hash": "4992dac84e28b200358c9c4921f35b81",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 79,
"avg_line_length": 28.604166666666668,
"alnum_prop": 0.5713765477057539,
"repo_name": "niphlod/pydal",
"id": "82aa116980460414a1597f46821ca1da88189da2",
"size": "2746",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pydal/dialects/teradata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "820628"
}
],
"symlink_target": ""
}
|
from robot.output.xmllogger import XmlLogger
class OutputWriter(XmlLogger):
def __init__(self, output):
XmlLogger.__init__(self, output, generator='Rebot')
def start_message(self, msg):
self._write_message(msg)
def close(self):
self._writer.end('robot')
self._writer.close()
def end_result(self, result):
self.close()
|
{
"content_hash": "cf01239991d994e6c2bc05e4995d988c",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 59,
"avg_line_length": 22.352941176470587,
"alnum_prop": 0.6210526315789474,
"repo_name": "ktan2020/legacy-automation",
"id": "0be04efb2db79cc238ceafabaeebca8b0469bf9d",
"size": "986",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "win/Lib/site-packages/robot/reporting/outputwriter.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "913"
},
{
"name": "Ada",
"bytes": "289"
},
{
"name": "Assembly",
"bytes": "687"
},
{
"name": "Boo",
"bytes": "540"
},
{
"name": "C",
"bytes": "40116"
},
{
"name": "C#",
"bytes": "474"
},
{
"name": "C++",
"bytes": "393"
},
{
"name": "CSS",
"bytes": "70883"
},
{
"name": "ColdFusion",
"bytes": "1012"
},
{
"name": "Common Lisp",
"bytes": "1034"
},
{
"name": "D",
"bytes": "1858"
},
{
"name": "Eiffel",
"bytes": "426"
},
{
"name": "Erlang",
"bytes": "9243"
},
{
"name": "FORTRAN",
"bytes": "1810"
},
{
"name": "Forth",
"bytes": "182"
},
{
"name": "Groovy",
"bytes": "2366"
},
{
"name": "Haskell",
"bytes": "816"
},
{
"name": "Haxe",
"bytes": "455"
},
{
"name": "Java",
"bytes": "1155"
},
{
"name": "JavaScript",
"bytes": "69444"
},
{
"name": "Lua",
"bytes": "795"
},
{
"name": "Matlab",
"bytes": "1278"
},
{
"name": "OCaml",
"bytes": "350"
},
{
"name": "Objective-C++",
"bytes": "885"
},
{
"name": "PHP",
"bytes": "1411"
},
{
"name": "Pascal",
"bytes": "388"
},
{
"name": "Perl",
"bytes": "252651"
},
{
"name": "Pike",
"bytes": "589"
},
{
"name": "Python",
"bytes": "42085780"
},
{
"name": "R",
"bytes": "1156"
},
{
"name": "Ruby",
"bytes": "480"
},
{
"name": "Scheme",
"bytes": "282"
},
{
"name": "Shell",
"bytes": "30518"
},
{
"name": "Smalltalk",
"bytes": "926"
},
{
"name": "Squirrel",
"bytes": "697"
},
{
"name": "Stata",
"bytes": "302"
},
{
"name": "SystemVerilog",
"bytes": "3145"
},
{
"name": "Tcl",
"bytes": "1039"
},
{
"name": "TeX",
"bytes": "1746"
},
{
"name": "VHDL",
"bytes": "985"
},
{
"name": "Vala",
"bytes": "664"
},
{
"name": "Verilog",
"bytes": "439"
},
{
"name": "Visual Basic",
"bytes": "2142"
},
{
"name": "XSLT",
"bytes": "152770"
},
{
"name": "ooc",
"bytes": "890"
},
{
"name": "xBase",
"bytes": "769"
}
],
"symlink_target": ""
}
|
import pytest
from datadog_checks.base import AgentCheck
# Minimal E2E testing
@pytest.mark.e2e
def test_e2e(aggregator, instance, dd_agent_check):
# Prevent the integration from failing before even running the check
instance['ticket_location'] = '.'
dd_agent_check(instance, rate=True)
aggregator.assert_service_check('mapr.can_connect', AgentCheck.OK, count=2)
|
{
"content_hash": "220c9726ab15a8cc6c6f602c943518fc",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 79,
"avg_line_length": 27.428571428571427,
"alnum_prop": 0.7447916666666666,
"repo_name": "DataDog/integrations-core",
"id": "39c16541e910704cf4d7522dca80da2328ad41d0",
"size": "499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mapr/tests/test_e2e.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
}
|
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
T = current.T
settings = current.deployment_settings
"""
Template settings
All settings which are to configure a specific template are located here
Deployers should ideally not need to edit any other files outside of their template folder
"""
# PrePopulate data
settings.base.prepopulate = ("skeleton", "demo/users")
# Theme (folder to use for views/layout.html)
settings.base.theme = "skeleton"
# Authentication settings
# Should users be allowed to register themselves?
#settings.security.self_registration = False
# Do new users need to verify their email address?
#settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
#settings.auth.registration_requires_approval = True
# Restrict the Location Selector to just certain countries
# NB This can also be over-ridden for specific contexts later
# e.g. Activities filtered to those of parent Project
#settings.gis.countries = ["US"]
# L10n settings
# Languages used in the deployment (used for Language Toolbar & GIS Locations)
# http://www.loc.gov/standards/iso639-2/php/code_list.php
#settings.L10n.languages = OrderedDict([
# ("ar", "العربية"),
# ("bs", "Bosanski"),
# ("en", "English"),
# ("fr", "Français"),
# ("de", "Deutsch"),
# ("el", "ελληνικά"),
# ("es", "Español"),
# ("it", "Italiano"),
# ("ja", "日本語"),
# ("km", "ភាសាខ្មែរ"),
# ("ko", "한국어"),
# ("ne", "नेपाली"), # Nepali
# ("prs", "دری"), # Dari
# ("ps", "پښتو"), # Pashto
# ("pt", "Português"),
# ("pt-br", "Português (Brasil)"),
# ("ru", "русский"),
# ("tet", "Tetum"),
# ("tl", "Tagalog"),
# ("ur", "اردو"),
# ("vi", "Tiếng Việt"),
# ("zh-cn", "中文 (简体)"),
# ("zh-tw", "中文 (繁體)"),
#])
# Default language for Language Toolbar (& GIS Locations in future)
#settings.L10n.default_language = "en"
# Uncomment to Hide the language toolbar
#settings.L10n.display_toolbar = False
# Security Policy
# http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy
# 1: Simple (default): Global as Reader, Authenticated as Editor
# 2: Editor role required for Update/Delete, unless record owned by session
# 3: Apply Controller ACLs
# 4: Apply both Controller & Function ACLs
# 5: Apply Controller, Function & Table ACLs
# 6: Apply Controller, Function, Table ACLs and Entity Realm
# 7: Apply Controller, Function, Table ACLs and Entity Realm + Hierarchy
# 8: Apply Controller, Function, Table ACLs, Entity Realm + Hierarchy and Delegations
#
#settings.security.policy = 7 # Organisation-ACLs
# Comment/uncomment modules here to disable/enable them
# Modules menu is defined in modules/eden/menu.py
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
#("sync", Storage(
# name_nice = T("Synchronization"),
# #description = "Synchronization",
# restricted = True,
# access = "|1|", # Only Administrators can see this module in the default menu & access the controller
# module_type = None # This item is handled separately for the menu
#)),
#("tour", Storage(
# name_nice = T("Guided Tour Functionality"),
# module_type = None,
#)),
#("translate", Storage(
# name_nice = T("Translation Functionality"),
# #description = "Selective translation of strings based on module.",
# module_type = None,
#)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 1
)),
])
|
{
"content_hash": "0c9e5ae2a5b803f96f28fb0b5b8ecf77",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 137,
"avg_line_length": 35.98675496688742,
"alnum_prop": 0.6404122193595878,
"repo_name": "gnarula/eden_deployment",
"id": "4a1dd70e18f744e9f06b1a6aaf2c1a0aba24ce02",
"size": "5558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "private/templates/skeleton/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1305178"
},
{
"name": "JavaScript",
"bytes": "16338028"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "28218113"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "2491556"
}
],
"symlink_target": ""
}
|
from collections import abc
from unittest import mock
from cloudkittyclient.tests.unit.v1 import base
from cloudkittyclient.v1 import report_cli
class TestReportCli(base.BaseAPIEndpointTestCase):
def test_report_tenant_list(self):
class DummyAPIClient(object):
def get_tenants(*args, **kwargs):
return ['ee530dfc-319a-438f-9d43-346cfef501d6',
'91743a9a-688b-4526-b568-7b501531176c',
'4468704c-972e-4cfd-a342-9b71c493b79b']
class ClientWrap(object):
report = DummyAPIClient()
class DummyParsedArgs(object):
def __init__(self):
self.begin = '2042-01-01T00:00:00'
self.end = '2042-12-01T00:00:00'
class DummyCliTenantList(report_cli.CliTenantList):
def __init__(self):
pass
def __get_client_from_osc(*args):
return ClientWrap()
parsed_args = DummyParsedArgs()
cli_class_instance = DummyCliTenantList()
with mock.patch('cloudkittyclient.utils.get_client_from_osc',
new=__get_client_from_osc):
# NOTE(peschk_l): self is only used used to get a client so just we
# just override __init__ in order to skip class instanciation. In
# python3 we could just have passed None
result = report_cli.CliTenantList.take_action(
cli_class_instance, parsed_args)
assert len(result) == 2
assert result[0] == ('Tenant ID', )
assert isinstance(result[1], abc.Iterable)
for res in result[1]:
assert isinstance(res, abc.Iterable)
|
{
"content_hash": "c46b638db18816983a3bdf337f634ea8",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 79,
"avg_line_length": 34.51020408163265,
"alnum_prop": 0.5955056179775281,
"repo_name": "openstack/python-cloudkittyclient",
"id": "bce53d546bb160916a2aa944faaa919212b7adb4",
"size": "2299",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cloudkittyclient/tests/unit/v1/test_report_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "223930"
}
],
"symlink_target": ""
}
|
import pkg_resources
__version__ = pkg_resources.get_distribution('erajp').version
|
{
"content_hash": "03aaf05bb840b41b5678b551278bab00",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 61,
"avg_line_length": 17.2,
"alnum_prop": 0.7441860465116279,
"repo_name": "intermezzo-fr/erajp",
"id": "5b7fe01b1affc6fd87dc3a13e64fa69db470cd59",
"size": "110",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "erajp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3963"
}
],
"symlink_target": ""
}
|
"""
Enumerations used by text and related objects
"""
from __future__ import absolute_import
from .base import (
alias,
Enumeration,
EnumMember,
ReturnValueOnlyEnumMember,
XmlEnumeration,
XmlMappedEnumMember,
)
class MSO_AUTO_SIZE(Enumeration):
"""
Determines the type of automatic sizing allowed.
The following names can be used to specify the automatic sizing behavior
used to fit a shape's text within the shape bounding box, for example::
from pptx.enum.text import MSO_AUTO_SIZE
shape.text_frame.auto_size = MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE
The word-wrap setting of the text frame interacts with the auto-size
setting to determine the specific auto-sizing behavior.
Note that ``TextFrame.auto_size`` can also be set to |None|, which removes
the auto size setting altogether. This causes the setting to be inherited,
either from the layout placeholder, in the case of a placeholder shape, or
from the theme.
"""
NONE = 0
SHAPE_TO_FIT_TEXT = 1
TEXT_TO_FIT_SHAPE = 2
__ms_name__ = "MsoAutoSize"
__url__ = (
"http://msdn.microsoft.com/en-us/library/office/ff865367(v=office.15" ").aspx"
)
__members__ = (
EnumMember(
"NONE",
0,
"No automatic sizing of the shape or text will be don"
"e. Text can freely extend beyond the horizontal and vertical ed"
"ges of the shape bounding box.",
),
EnumMember(
"SHAPE_TO_FIT_TEXT",
1,
"The shape height and possibly width are"
" adjusted to fit the text. Note this setting interacts with the"
" TextFrame.word_wrap property setting. If word wrap is turned o"
"n, only the height of the shape will be adjusted; soft line bre"
"aks will be used to fit the text horizontally.",
),
EnumMember(
"TEXT_TO_FIT_SHAPE",
2,
"The font size is reduced as necessary t"
"o fit the text within the shape.",
),
ReturnValueOnlyEnumMember(
"MIXED",
-2,
"Return value only; indicates a combination of auto"
"matic sizing schemes are used.",
),
)
@alias("MSO_UNDERLINE")
class MSO_TEXT_UNDERLINE_TYPE(XmlEnumeration):
"""
Indicates the type of underline for text. Used with
:attr:`.Font.underline` to specify the style of text underlining.
Alias: ``MSO_UNDERLINE``
Example::
from pptx.enum.text import MSO_UNDERLINE
run.font.underline = MSO_UNDERLINE.DOUBLE_LINE
"""
__ms_name__ = "MsoTextUnderlineType"
__url__ = "http://msdn.microsoft.com/en-us/library/aa432699.aspx"
__members__ = (
XmlMappedEnumMember("NONE", 0, "none", "Specifies no underline."),
XmlMappedEnumMember(
"DASH_HEAVY_LINE", 8, "dashHeavy", "Specifies a dash underline."
),
XmlMappedEnumMember("DASH_LINE", 7, "dash", "Specifies a dash line underline."),
XmlMappedEnumMember(
"DASH_LONG_HEAVY_LINE",
10,
"dashLongHeavy",
"Specifies a long heavy line underline.",
),
XmlMappedEnumMember(
"DASH_LONG_LINE", 9, "dashLong", "Specifies a dashed long line underline."
),
XmlMappedEnumMember(
"DOT_DASH_HEAVY_LINE",
12,
"dotDashHeavy",
"Specifies a dot dash heavy line underline.",
),
XmlMappedEnumMember(
"DOT_DASH_LINE", 11, "dotDash", "Specifies a dot dash line underline."
),
XmlMappedEnumMember(
"DOT_DOT_DASH_HEAVY_LINE",
14,
"dotDotDashHeavy",
"Specifies a dot dot dash heavy line underline.",
),
XmlMappedEnumMember(
"DOT_DOT_DASH_LINE",
13,
"dotDotDash",
"Specifies a dot dot dash line underline.",
),
XmlMappedEnumMember(
"DOTTED_HEAVY_LINE",
6,
"dottedHeavy",
"Specifies a dotted heavy line underline.",
),
XmlMappedEnumMember(
"DOTTED_LINE", 5, "dotted", "Specifies a dotted line underline."
),
XmlMappedEnumMember(
"DOUBLE_LINE", 3, "dbl", "Specifies a double line underline."
),
XmlMappedEnumMember(
"HEAVY_LINE", 4, "heavy", "Specifies a heavy line underline."
),
XmlMappedEnumMember(
"SINGLE_LINE", 2, "sng", "Specifies a single line underline."
),
XmlMappedEnumMember(
"WAVY_DOUBLE_LINE", 17, "wavyDbl", "Specifies a wavy double line underline."
),
XmlMappedEnumMember(
"WAVY_HEAVY_LINE", 16, "wavyHeavy", "Specifies a wavy heavy line underline."
),
XmlMappedEnumMember(
"WAVY_LINE", 15, "wavy", "Specifies a wavy line underline."
),
XmlMappedEnumMember("WORDS", 1, "words", "Specifies underlining words."),
ReturnValueOnlyEnumMember("MIXED", -2, "Specifies a mixed of underline types."),
)
@alias("MSO_ANCHOR")
class MSO_VERTICAL_ANCHOR(XmlEnumeration):
"""
Specifies the vertical alignment of text in a text frame. Used with the
``.vertical_anchor`` property of the |TextFrame| object. Note that the
``vertical_anchor`` property can also have the value None, indicating
there is no directly specified vertical anchor setting and its effective
value is inherited from its placeholder if it has one or from the theme.
|None| may also be assigned to remove an explicitly specified vertical
anchor setting.
"""
__ms_name__ = "MsoVerticalAnchor"
__url__ = "http://msdn.microsoft.com/en-us/library/office/ff865255.aspx"
__members__ = (
XmlMappedEnumMember(
None,
None,
None,
"Text frame has no vertical anchor specified "
"and inherits its value from its layout placeholder or theme.",
),
XmlMappedEnumMember("TOP", 1, "t", "Aligns text to top of text frame"),
XmlMappedEnumMember("MIDDLE", 3, "ctr", "Centers text vertically"),
XmlMappedEnumMember("BOTTOM", 4, "b", "Aligns text to bottom of text frame"),
ReturnValueOnlyEnumMember(
"MIXED",
-2,
"Return value only; indicates a combination of the " "other states.",
),
)
@alias("PP_ALIGN")
class PP_PARAGRAPH_ALIGNMENT(XmlEnumeration):
"""
Specifies the horizontal alignment for one or more paragraphs.
Alias: ``PP_ALIGN``
Example::
from pptx.enum.text import PP_ALIGN
shape.paragraphs[0].alignment = PP_ALIGN.CENTER
"""
__ms_name__ = "PpParagraphAlignment"
__url__ = (
"http://msdn.microsoft.com/en-us/library/office/ff745375(v=office.15" ").aspx"
)
__members__ = (
XmlMappedEnumMember("CENTER", 2, "ctr", "Center align"),
XmlMappedEnumMember(
"DISTRIBUTE",
5,
"dist",
"Evenly distributes e.g. Japanese chara"
"cters from left to right within a line",
),
XmlMappedEnumMember(
"JUSTIFY",
4,
"just",
"Justified, i.e. each line both begins and"
" ends at the margin with spacing between words adjusted such th"
"at the line exactly fills the width of the paragraph.",
),
XmlMappedEnumMember(
"JUSTIFY_LOW",
7,
"justLow",
"Justify using a small amount of sp" "ace between words.",
),
XmlMappedEnumMember("LEFT", 1, "l", "Left aligned"),
XmlMappedEnumMember("RIGHT", 3, "r", "Right aligned"),
XmlMappedEnumMember("THAI_DISTRIBUTE", 6, "thaiDist", "Thai distributed"),
ReturnValueOnlyEnumMember(
"MIXED",
-2,
"Return value only; indicates multiple paragraph al"
"ignments are present in a set of paragraphs.",
),
)
|
{
"content_hash": "035a039113845fd57579eeaa80aa7407",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 88,
"avg_line_length": 32.39525691699605,
"alnum_prop": 0.5833333333333334,
"repo_name": "scanny/python-pptx",
"id": "54297bbd51d1b46efd336ec90378e213a2bce7bf",
"size": "8215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pptx/enum/text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "124592"
},
{
"name": "Makefile",
"bytes": "2055"
},
{
"name": "PLpgSQL",
"bytes": "48599"
},
{
"name": "Python",
"bytes": "2152173"
}
],
"symlink_target": ""
}
|
from setuptools import setup
if __name__ == '__main__':
setup(
name="LabbookDB",
version="0.0.1",
description = "A Wet-Work-Tracking Database Application Framework",
author = "Horea Christian",
author_email = "horea.christ@yandex.com",
url = "https://github.com/TheChymera/LabbookDB",
keywords = [
"laboratory notebook",
"labbook",
"wet work",
"record keeping",
"reports",
"life science",
"biology",
"neuroscience",
"behaviour",
"relational database",
"SQL",
],
classifiers = [],
install_requires = [],
provides = ["labbookdb"],
packages = [
"labbookdb",
"labbookdb.db",
"labbookdb.evaluate",
"labbookdb.introspection",
"labbookdb.report",
],
entry_points = {'console_scripts' : \
['LDB = labbookdb.cli:main']
}
)
|
{
"content_hash": "cd78a23952d5149fde16fb45c1316492",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 69,
"avg_line_length": 21.7027027027027,
"alnum_prop": 0.6151930261519303,
"repo_name": "TheChymera/LabbookDB",
"id": "18985c663eaa4d19dcc5be49a33c7440631f4958",
"size": "803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "144536"
},
{
"name": "Shell",
"bytes": "51"
},
{
"name": "TeX",
"bytes": "1544"
}
],
"symlink_target": ""
}
|
"""
Setup/build script for LBN EBB Optionifier (py2app on a Mac)
Copyright (c) 2013, Jake Hartz. All rights reserved.
Use of this source code is governed by a BSD-style license
that can be found in the LICENSE.txt file.
Usage (Mac OS X):
./setup-mac.py
/System/Library/Frameworks/CoreServices.framework/Frameworks/LaunchServices.framework/Versions/A/Support/lsregister -f "dist/LBN EBB Optionifier.app"
"""
import os, sys, shutil, time, subprocess
from setuptools import setup
from vars import metadata, PLUGINS_LOCAL_BASE_DIR, PLUGINS_REMOTE_BASE_DIR
def get_folder(path):
if isinstance(path, list):
return [get_folder(i) for i in path]
else:
return (path, [os.path.join(path, i) for i in os.listdir(path) if i[:1] != "." and os.path.isfile(os.path.join(path, i))])
if sys.platform != "darwin":
print "ERROR: This build script is for Macs only."
elif sys.version_info < (2, 6):
print "ERROR: Requires Python 2.6 or greater."
else:
if "py2app" not in sys.argv:
sys.argv.insert(1, "py2app")
data_files = [get_folder("resources")]
rm_local_base_dir = False
if os.path.isdir(PLUGINS_LOCAL_BASE_DIR) == False:
for loc in PLUGINS_REMOTE_BASE_DIR:
if os.path.exists(os.path.join(loc, "plugins.js")):
os.mkdir(PLUGINS_LOCAL_BASE_DIR)
rm_local_base_dir = True
shutil.copy(os.path.join(loc, "plugins.js"), os.path.join(PLUGINS_LOCAL_BASE_DIR, "plugins.js"))
if os.path.isdir(os.path.join(loc, "plugins")):
shutil.copytree(os.path.join(loc, "plugins"), os.path.join(PLUGINS_LOCAL_BASE_DIR, "plugins"))
break
if os.path.isdir(PLUGINS_LOCAL_BASE_DIR):
data_files.append(get_folder(PLUGINS_LOCAL_BASE_DIR))
if os.path.isdir(os.path.join(PLUGINS_LOCAL_BASE_DIR, "plugins")):
data_files.append(get_folder(os.path.join(PLUGINS_LOCAL_BASE_DIR, "plugins")))
options = {
"setup_requires": ["py2app"],
"app": ["optionifier.py"],
"data_files": data_files,
"options": {
"py2app": {
"argv_emulation": True,
"iconfile": "resources/icon.icns",
"plist": {
"CFBundleIdentifier": "com.github.jhartz.lbn-ebb-optionifier",
"CFBundleGetInfoString": metadata.description,
"NSHumanReadableCopyright": metadata.copyright,
"LSArchitecturePriority": [
"i386"
],
"UTExportedTypeDeclarations": [
{
"UTTypeIdentifier": "com.github.jhartz.lbn-ebb-optionifier.leo",
"UTTypeDescription": "LBN EBB Options",
"UTTypeConformsTo": [
"public.data"
],
"UTTypeTagSpecification": {
"public.filename-extension": ["leo"]
}
}
],
"CFBundleDocumentTypes": [
{
"CFBundleTypeIconFile": "icon.icns",
"CFBundleTypeName": "LBN EBB Options",
"LSItemContentTypes": [
"com.github.jhartz.lbn-ebb-optionifier.leo"
],
"CFBundleTypeRole": "Editor",
"LSHandlerRank": "Owner"
}
]
}
}
}
}
setup_options = dict(metadata.items() + options.items())
setup(**setup_options)
if rm_local_base_dir:
shutil.rmtree(PLUGINS_LOCAL_BASE_DIR)
|
{
"content_hash": "55574161ef1c168061f1dd651d11ac9b",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 153,
"avg_line_length": 40.144329896907216,
"alnum_prop": 0.5190035952747817,
"repo_name": "jhartz/lbn-ebb-optionifier",
"id": "15bbad0626d9d2d94e06eb97e6224fe5b8f67b05",
"size": "3935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup-mac.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "77177"
}
],
"symlink_target": ""
}
|
from envi.const import *
from envi import IF_NOFALL, IF_PRIV, IF_CALL, IF_BRANCH, IF_RET, IF_COND
IF_BYTE = 1<<8
# no operand instructions
nocode = [
'.word' # Something is wrong, so return the dirty word
]
# single operand intructions
scode = [
('rrc', 0), # RRC Rotate right through carry
('swpb', 0), # SWPB Swap bytes
('rra', 0), # RRA Rotate right arithmetic
('sxt', 0), # SXT Sign extend byte to word
('push', 0), # PUSH Push value onto stack
('call', IF_CALL), # CALL Subroutine call; push PC and move source to PC
('reti', IF_NOFALL), # RETI Return from interrupt; pop SR then pop PC
]
# jump conditions
# PC = PC + 2 * offset
jcode = [
('jnz',IF_BRANCH | IF_COND), # JNE/JNZ Jump if not equal/zero
('jz',IF_BRANCH | IF_COND), # JEQ/JZ Jump if equal/zero
('jnc',IF_BRANCH | IF_COND), # JNC/JLO Jump if no carry/lower
('jc',IF_BRANCH | IF_COND), # JC/JHS Jump if carry/higher or same
('jn',IF_BRANCH | IF_COND), # JN Jump if negative
('jge',IF_BRANCH | IF_COND), # JGE Jump if greater or equal
('jl',IF_BRANCH | IF_COND), # JL Jump if less
('jmp',IF_BRANCH|IF_NOFALL), # JMP Jump (unconditionally)
]
# double operand instrucitons
dcode = [
'mov', # MOV Move source to destination
'add', # ADD Add source to destination
'addc', # ADDC Add source and carry to destination
'subc', # SUBC Subtract source from destination (with carry)
'sub', # SUB Subtract source from destination
'cmp', # CMP Compare (pretend to subtract) source from destination
'dadd', # Decimal add source to destination (with carry)
'bit', # BIT Test bits of source AND destination
'bic', # BIC Bit clear (dest &= ~src)
'bis', # BIS Bit set (logical OR)
'xor', # XOR Exclusive or source with destination
'and' # AND Logical AND source with destination (dest &= src)
]
# double special operand instructions
dspcode = [
('nop', 0), # No Operation - MOV
('pop', 0), # POP stackpointer - MOV
('br', IF_BRANCH|IF_NOFALL), # Branch - MOV
('ret', IF_NOFALL), # Return - MOV
('clr', 0), # Clear destination - MOV
('rla', 0), # Shift and rotate left - ADD
('inc', 0), # Increment by one - ADD
('incd', 0), # Increment by two - ADD
('rlc', 0), # Shift and rotate left - ADDC
('adc', 0), # Adding only the carry bit - ADDC
('sbc', 0), # Subtracting only the carry bit - SUBC
('dec', 0), # Decrement by one - SUB
('decd', 0), # Decrement by two - SUB
('tst', 0), # Test - CMP
('dadc', 0), # Decimal adding only the carry bit - DADD
('clrc', 0), # Status register operation - BIC
('setc', 0), # Status register operation - BIS
('clrz', 0), # Status register operation - BIC
('setz', 0), # Status register operation - BIS
('clrn', 0), # Status register operation - BIC
('setn', 0), # Status register operation - BIS
('dint', 0), # Status register operation - BIC
('eint', 0), # Status register operation - BIC
('inv', 0), # Invert value - XOR
]
# Status Register Flags
"""
N, Z, C and V are the usual processor status bits, set as a side effect to instruction execution. If r2 is specified as a destination, the explicitly written bits override the side effects. An instruction sets all 4 bits, or none of them. Logical instructions set C to the opposite of Z (C is set if the result is NOT zero), and clear V to 0.
C is a "carry" bit as opposed to a "borrow" bit when subtracting. That is, subtract with carry of A-B computes A + ~B + Carry. (~ is the C "not" or "bitwise invert" operator.)
http://cnx.org/content/m23497/latest/
Bit Description
8 V Overflow bit.V = 1 -> Result of an arithmetic operation overflows the signed-variable range.
7 SCG1 System clock generator 0.SCG1 = 1 -> DCO generator is turned off - if not used for MCLK or SMCLK.
6 SCG0 System clock generator 1.SCG0 = 1 -> FLL+ loop control is turned off.
5 OSCOFF Oscillator Off.OSCOFF = 1 -> turns off LFXT1 when it is not used for MCLK or SMCLK.
4 CPUOFF CPU off.CPUOFF = 1 -> disable CPU core.
3 GIE General interrupt enable.GIE = 1 -> enables maskable interrupts.
2 N Negative flag.N = 1 -> result of a byte or word operation is negative.
1 Z Zero flag.Z = 1 -> result of a byte or word operation is 0.
0 C Carry flag.C = 1 -> result of a byte or word operation produced a carry.
"""
REG_SR_C = 1 << 0 # Carry bit
REG_SR_Z = 1 << 1
REG_SR_N = 1 << 2
REG_SR_GIE = 1 << 3 # global interrupt enabler, 0 interupts
REG_SR_CPUOFF = 1 << 4 # cpu off - various low-power modes
REG_SR_OSCOFF = 1 << 5 # oscillator off - various low-power modes
REG_SR_SCG0 = 1 << 6 # system clock generator
REG_SR_SCG1 = 1 << 7 # system clock generator
REG_SR_V = 1 << 8
# Primary Functional registers index values
REG_PC = 0 # reg0 is the Program Counter
REG_SP = 1 # reg1 is the Stack Pointer
REG_SR = 2 # reg2 is the Status Register
REG_CG = 3 # reg3 is the Constant Generator
# Operand Type
SINGLE_OPCODE_TYPE = 0
JUMP_OPCODE_TYPE = 1
DOUBLE_OPCODE_TYPE = 2
SP_OPCODE_TYPE = 3
# Register Modes
REG_DIRECT = 0x0
REG_INDEX = 0x1
REG_INDIRECT = 0x2
REG_IND_AUTOINC = 0x3
JUMP_MODE = 0x4
# Masks
TEST_MASKS = 0xF000 # test operands
SINGLE_MASKS = 0xE000 # ID single operands
JUMP_MASKS = 0xC000 # ID jumps
JUMP_OFFSET = 0x3FF # Jump offset
SOURCE_REG = 0xF # single op reg
DSOURCE_REG = 0xF00 # double source reg
DEST_REG = 0xF # double dest reg
BYTE_WORD = 0x40 # byte or word
SOURCE_ADDR_MODE= 0x30 # Addressing mode source
DEST_ADDR_MODE = 0x80 # Addressing mode destination
RETI_MASK = 0x1300 # Return 'reti'
REG_BYTE = 0x00FF # Clear the most significant byte
REG_FLAGS = 0x01FF # Clear most significant 7 bits to get Status Register flags
# Compare to get the proper Opcode
SINGLE_OPCODE = 0xF80 # Single opcode - rotate right 7
DOUBLE_OPCODE = 0xF000 # Double opcode - rotate right 12
JUMP_OPCODE = 0x1c00 # Jump condition - rotate right 10
# Sizes
BYTE = 1 #bytes
WORD = 2 #bytes
|
{
"content_hash": "842e0b284ee21c61b8412542e7a51aa1",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 342,
"avg_line_length": 44.12162162162162,
"alnum_prop": 0.6055130168453292,
"repo_name": "pwns4cash/vivisect",
"id": "1715d80c02a8243d1f72132b6990b8e45cdcb65f",
"size": "6530",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "envi/archs/msp430/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "167795"
},
{
"name": "CSS",
"bytes": "15980"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "11384786"
},
{
"name": "Shell",
"bytes": "476"
}
],
"symlink_target": ""
}
|
from flask import Flask, render_template, request, current_app
import logging
import gevent.wsgi
import json
from vote import queue, redis_handler
from vote.signals import app_start
app = Flask(__name__, instance_path='/voting-machine/vote/web/')
TEAMS_COMPETING = []
def init_app(app):
"""
Initialize the rabbitmq extension.
"""
rabbit_queue = queue.Queue()
r_handler = redis_handler.RedisHandler()
app.extensions['rabbit_queue'] = rabbit_queue
app.extensions['r_handler'] = r_handler
@app_start.connect
def start_extensions(app, **kwargs):
"""
Start redis and rabbitmq at app startup
"""
extensions = [
app.extensions.get('rabbit_queue'),
app.extensions.get('r_handler')
]
for extension in extensions:
if extension:
extension.start()
def votes():
"""
app route to show the results of voting
:return: rendered template of the voting results
"""
vote_total = {}
for team in range(1, 5):
team = 'team{}'.format(team)
vote_total[team] = current_app.extensions['r_handler'].get_key(team)
if vote_total[team] is None:
vote_total[team] = 0
return vote_total
@app.route('/', methods=['GET', 'POST'])
def place_vote():
"""
Main page,
:return: rendering a page with the status message of the vote for POST
:return: rendering the default voting page for GET requests
"""
vote_total = votes()
if request.method == 'POST':
team = request.form['vote']
# Post a message with the team being voted for.
message = json.dumps({'team': team})
current_app.extensions['rabbit_queue'].queue_message(message)
# Rendering the output for index.
return render_template(
'index.html',
last_vote=team,
teams_competing=TEAMS_COMPETING,
team_votes_total=vote_total,
)
else:
return render_template('index.html',
teams_competing=TEAMS_COMPETING,
team_votes_total=vote_total)
def create_teams():
"""
Helper method to create teams.
"""
for x in range(1, 4):
TEAMS_COMPETING.append('{}'.format(x))
def run_app(app):
init_app(app)
app_start.send(app)
try:
logging.warning('starting web service')
ws = gevent.wsgi.WSGIServer(('0.0.0.0', int(5000)), app)
ws.serve_forever()
finally:
logging.info('change this later')
def main():
app.debug = True
create_teams()
run_app(app)
|
{
"content_hash": "9534f53ad13f2b2b999ab76232a839eb",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 76,
"avg_line_length": 23.75925925925926,
"alnum_prop": 0.6083398285268901,
"repo_name": "mschlue/voting-machine",
"id": "f399e16a5b6797eddfb960826ba5e058cb4e9a52",
"size": "2566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vote/web/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2710"
},
{
"name": "HTML",
"bytes": "7114"
},
{
"name": "Python",
"bytes": "7796"
}
],
"symlink_target": ""
}
|
"""Bcp server for clients which connect and disconnect randomly."""
from mpf.exceptions.runtime_error import MpfRuntimeError
from mpf.core.utility_functions import Util
from mpf.core.mpf_controller import MpfController
class BcpServer(MpfController):
"""Server socket which listens for incoming BCP clients."""
config_name = "bcp_server"
def __init__(self, machine, ip, port, server_type):
"""Initialise BCP server."""
super().__init__(machine)
self._server = None
self._ip = ip
self._port = port
self._type = server_type
async def start(self):
"""Start the server."""
try:
self._server = await self.machine.clock.start_server(
self._accept_client, self._ip, self._port)
except IOError as e:
raise MpfRuntimeError("Failed to bind BCP Socket to {} on port {}. "
"Is there another application running on that port?".format(self._ip, self._port), 1,
"MPF BCP Server") from e
def stop(self):
"""Stop the BCP server, i.e. closes the listening socket(s)."""
if self._server:
self._server.close()
self._server = None
async def _accept_client(self, client_reader, client_writer):
"""Accept an connection and create client."""
self.info_log("New client connected.")
client = Util.string_to_class(self._type)(self.machine, None, self.machine.bcp)
client.accept_connection(client_reader, client_writer)
client.exit_on_close = False
self.machine.bcp.transport.register_transport(client)
|
{
"content_hash": "56339b5085770540a34c47e43e2277e0",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 119,
"avg_line_length": 37.17777777777778,
"alnum_prop": 0.6120741183502689,
"repo_name": "missionpinball/mpf",
"id": "5c22ab0a6e05efcfb81cd875c3f3d3eb1a7f2a7a",
"size": "1673",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "mpf/core/bcp/bcp_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "640"
},
{
"name": "C++",
"bytes": "4019"
},
{
"name": "Makefile",
"bytes": "382"
},
{
"name": "Python",
"bytes": "4532953"
}
],
"symlink_target": ""
}
|
"""
Contains plotting tools developed after the implementation of analysis v2
"""
import matplotlib.pyplot as plt
from matplotlib import colors as mpl_colors
import numpy as np
import logging
log = logging.getLogger(__name__)
def scatter_pnts_overlay(
x,
y,
fig=None,
ax=None,
transpose=False,
color='w',
edgecolors='gray',
linewidth=0.5,
marker='.',
s=None,
c=None,
alpha=1,
setlabel=None,
cmap=None,
**kw):
"""
Adds a scattered overlay of the provided data points
x, and y are lists.
Args:
x (array [shape: n*1]): x data
y (array [shape: m*1]): y data
fig (Object):
figure object
"""
if ax is None:
fig, ax = plt.subplots()
if transpose:
log.debug('Inverting x and y axis for non-interpolated points')
ax.scatter(y, x, marker=marker,
color=color, edgecolors=edgecolors, linewidth=linewidth, s=s,
c=c, alpha=alpha, label=setlabel, cmap=cmap)
else:
ax.scatter(x, y, marker=marker,
color=color, edgecolors=edgecolors, linewidth=linewidth, s=s,
c=c, alpha=alpha, label=setlabel, cmap=cmap)
return fig, ax
def contour_overlay(x, y, z, colormap="viridis",
transpose: bool = False,
contour_levels: list = [90, 180, 270],
vlim: tuple = (0, 360),
linestyles: str = 'dashed',
linewidths: float = 2,
cyclic_data: bool = False,
return_contours_only: bool = False,
clabelkw={},
colors=None,
ax=None, fig=None, **kw):
"""
x, and y are lists, z is a matrix with shape (len(x), len(y))
N.B. The contour overaly suffers from artifacts sometimes
Args:
x (array [shape: n*1]): x data
y (array [shape: m*1]): y data
z (array [shape: n*m]): z data for the contour
colormap (matplotlib.colors.Colormap or str): colormap to be used
vlim (tuple(vmin, vmax)): required for the colormap nomalization and
for cyclic data
cyclic_data (bool): when `True` assumes z data is cyclic at the
boundaries specified by vlim and avoids contour artifacts
fig (Object):
figure object
"""
ax_fig_are_None = ax is None and fig is None
if ax is None:
fig, ax = plt.subplots()
vmin = vlim[0]
vmax = vlim[-1]
norm = mpl_colors.Normalize(vmin=vmin, vmax=vmax, clip=True)
fontsize = 'smaller'
if transpose:
y_tmp = np.copy(y)
y = np.copy(x)
x = y_tmp
z = np.transpose(z)
if cyclic_data:
# Avoid contour plot artifact for cyclic data by removing the
# data half way to the cyclic boundary
minz = (vmin + np.min(contour_levels)) / 2
maxz = (vmax + np.max(contour_levels)) / 2
z = np.copy(z) # don't change the original data
z[(z < minz) | (z > maxz)] = np.nan
c = ax.contour(x, y, z,
levels=contour_levels, linewidths=linewidths, cmap=colormap,
norm=norm, linestyles=linestyles, colors=colors)
if len(clabelkw):
ax.clabel(c, **clabelkw)
else:
ax.clabel(c, fmt='%.1f', inline='True', fontsize=fontsize)
if not return_contours_only:
return fig, ax
else:
contours = c.allsegs
if ax_fig_are_None:
fig.clf()
plt.close(fig)
del fig
del ax
return contours
def annotate_pnts(txt, x, y,
textcoords='offset points',
ha='center',
va='center',
xytext=(0, 0),
bbox=dict(boxstyle='circle, pad=0.2', fc='white', alpha=0.7),
arrowprops=None,
transpose=False,
fig=None,
ax=None,
**kw):
"""
A handy for loop for the ax.annotate
See fluxing analysis on how it is used
"""
if ax is None:
fig, ax = plt.subplots()
if transpose:
y_tmp = np.copy(y)
y = np.copy(x)
x = y_tmp
for i, text in enumerate(txt):
ax.annotate(text,
xy=(x[i], y[i]),
textcoords=textcoords,
ha=ha,
va=va,
xytext=xytext,
bbox=bbox)
return fig, ax
def vertices_for_meshgrid(x, y):
"""
Calculates the vertices of the X and Y to be used for generating
the X and Y meshgrid for matplotlib's pcolormesh
"""
x_vertices = np.zeros(np.array(x.shape) + 1)
x_vertices[1:-1] = (x[:-1] + x[1:]) / 2.0
x_vertices[0] = x[0] - (x[1] - x[0]) / 2.0
x_vertices[-1] = x[-1] + (x[-1] - x[-2]) / 2.0
# y coordinates
y_vertices = np.zeros(np.array(y.shape) + 1)
y_vertices[1:-1] = (y[:-1] + y[1:]) / 2.0
y_vertices[0] = y[0] - (y[1] - y[0]) / 2.0
y_vertices[-1] = y[-1] + (y[-1] - y[-2]) / 2.0
return x_vertices, y_vertices
def heatmap_data_to_pcolormesh(x, y):
"""
A wrapper to re-shape X and Y data generated by `interpolate_heatmap`
for maplotlib's pcolormesh
"""
x_vert, y_vert = vertices_for_meshgrid(x, y)
x_grid, y_grid = np.meshgrid(x_vert, y_vert)
return x_grid, y_grid
|
{
"content_hash": "1bff4bfbf449a23dba0d675daa1bef4c",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 80,
"avg_line_length": 29.978378378378377,
"alnum_prop": 0.5191128741435269,
"repo_name": "DiCarloLab-Delft/PycQED_py3",
"id": "a62690810701b0c4c8fdf9a772743117e0dd33f7",
"size": "5546",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pycqed/analysis_v2/tools/plotting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8748"
},
{
"name": "C++",
"bytes": "8802"
},
{
"name": "Cython",
"bytes": "8291"
},
{
"name": "OpenQASM",
"bytes": "15894"
},
{
"name": "Python",
"bytes": "7978715"
},
{
"name": "TeX",
"bytes": "8"
}
],
"symlink_target": ""
}
|
import sys
from copy import deepcopy
import pytest
from paramiko import Transport
from paramiko.channel import Channel
from paramiko.sftp_client import SFTPClient
from pytest_sftpserver.sftp.server import SFTPServer
# fmt: off
CONTENT_OBJ = dict(
a=dict(
b="testfile1",
c="testfile2",
f=["testfile5", "testfile6"]
),
d="testfile3"
)
# fmt: on
@pytest.yield_fixture(scope="session")
def sftpclient(sftpserver):
transport = Transport((sftpserver.host, sftpserver.port))
transport.connect(username="a", password="b")
sftpclient = SFTPClient.from_transport(transport)
yield sftpclient
sftpclient.close()
transport.close()
@pytest.yield_fixture
def content(sftpserver):
with sftpserver.serve_content(deepcopy(CONTENT_OBJ)):
yield
@pytest.mark.xfail(sys.version_info < (2, 7), reason="Intermittently broken on 2.6")
def test_sftpserver_bound(sftpserver):
assert sftpserver.wait_for_bind(1)
def test_sftpserver_available(sftpserver):
assert isinstance(sftpserver, SFTPServer)
assert sftpserver.is_alive()
assert str(sftpserver.port) in sftpserver.url
def test_sftpserver_connect(sftpclient):
assert isinstance(sftpclient.sock, Channel)
def test_sftpserver_listdir_empty(sftpclient):
assert sftpclient.listdir("/") == []
def test_sftpserver_listdir(content, sftpclient):
assert set(sftpclient.listdir("/")) == set(["a", "d"])
def test_sftpserver_get_file_dict(content, sftpclient):
with sftpclient.open("/a/b", "r") as f:
assert f.read() == b"testfile1"
def test_sftpserver_get_file_list(content, sftpclient):
with sftpclient.open("/a/f/0", "r") as f:
assert f.read() == b"testfile5"
@pytest.mark.parametrize(
("offset", "data", "expected"),
[
(4, "test", b"testtest6"),
(5, "test", b"testftest"),
(9, "test", b"testfile6test"),
(10, "test", b"testfile6\x00test"),
],
)
def test_sftpserver_put_file_offset(content, sftpclient, offset, data, expected):
with sftpclient.open("/a/f/1", "rw") as f:
f.seek(offset)
f.write(data)
f.seek(0)
assert f.read() == expected
def test_sftpserver_put_file_dict(content, sftpclient):
with sftpclient.open("/e", "w") as f:
f.write("testfile4")
assert set(sftpclient.listdir("/")) == set(["a", "d", "e"])
def test_sftpserver_put_file_list(content, sftpclient):
with sftpclient.open("/a/f/2", "w") as f:
f.write("testfile7")
assert set(sftpclient.listdir("/a/f")) == set(["0", "1", "2"])
def test_sftpserver_put_file(content, sftpclient, tmpdir):
tmpfile = tmpdir.join("test.txt")
tmpfile.write("Hello world")
sftpclient.put(str(tmpfile), "/a/test.txt")
assert set(sftpclient.listdir("/a")) == set(["test.txt", "b", "c", "f"])
def test_sftpserver_round_trip(content, sftpclient, tmpdir):
tmpfile = tmpdir.join("test.txt")
thetext = u"Just some plain, normal text"
tmpfile.write(thetext)
sftpclient.put(str(tmpfile), "/a/test.txt")
with sftpclient.open("/a/test.txt", "r") as result:
assert result.read() == thetext.encode()
def test_sftpserver_remove_file_dict(content, sftpclient):
sftpclient.remove("/a/c")
assert set(sftpclient.listdir("/a")) == set(["b", "f"])
def test_sftpserver_remove_file_list(content, sftpclient):
sftpclient.remove("/a/f/1")
assert set(sftpclient.listdir("/a/f")) == set(["0"])
def test_sftpserver_remove_file_list_fail(content, sftpclient):
with pytest.raises(IOError):
sftpclient.remove("/a/f/10")
def test_sftpserver_rename_file(content, sftpclient):
sftpclient.rename("/a/c", "/a/x")
assert set(sftpclient.listdir("/a")) == set(["b", "f", "x"])
def test_sftpserver_rename_file_fail_source(content, sftpclient):
with pytest.raises(IOError):
sftpclient.rename("/a/NOTHERE", "/a/x")
def test_sftpserver_rename_file_fail_target(content, sftpclient):
with pytest.raises(IOError):
sftpclient.rename("/a/c", "/a/NOTHERE/x")
def test_sftpserver_rmdir(content, sftpclient):
sftpclient.rmdir("/a")
assert set(sftpclient.listdir("/")) == set(["d"])
def test_sftpserver_mkdir(content, sftpclient):
sftpclient.mkdir("/a/x")
assert set(sftpclient.listdir("/a")) == set(["b", "c", "f", "x"])
def test_sftpserver_mkdir_existing(content, sftpclient):
with pytest.raises(IOError):
sftpclient.mkdir("/a")
assert set(sftpclient.listdir("/a")) == set(["b", "c", "f"])
def test_sftpserver_chmod(content, sftpclient):
# coverage
sftpclient.chmod("/a/b", 1)
with sftpclient.open("/a/b", "r") as f:
f.chmod(1)
def test_sftpserver_stat_non_str(sftpserver, sftpclient):
with sftpserver.serve_content(dict(a=123)):
assert sftpclient.stat("/a").st_size == 3
def test_sftpserver_exception(sftpclient, sftpserver):
with sftpserver.serve_content({"a": lambda: 1 / 0}):
with pytest.raises(IOError):
sftpclient.open("/a", "r")
def test_sftpserver_stat_non_existing(sftpclient, sftpserver):
with sftpserver.serve_content({}):
with pytest.raises(IOError):
sftpclient.stat("/a")
def test_sftpserver_chmod_non_existing(sftpclient, sftpserver):
with sftpserver.serve_content({}):
with pytest.raises(IOError):
sftpclient.chmod("/a", 600)
|
{
"content_hash": "314abb4b3ff26b55183142584ff48f1e",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 84,
"avg_line_length": 28.352631578947367,
"alnum_prop": 0.6547243363653239,
"repo_name": "ulope/pytest-sftpserver",
"id": "4b23f0be1451d571ab5a430788f94e6cbbeae964",
"size": "5387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sftp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "506"
},
{
"name": "Python",
"bytes": "27585"
}
],
"symlink_target": ""
}
|
from six import text_type as unicode
import os
from robot.errors import DataError
from .filelogger import FileLogger
from .loggerhelper import AbstractLogger, AbstractLoggerProxy
from .monitor import CommandLineMonitor
from .stdoutlogsplitter import StdoutLogSplitter
class Logger(AbstractLogger):
"""A global logger proxy to which new loggers may be registered.
Whenever something is written to LOGGER in code, all registered loggers are
notified. Messages are also cached and cached messages written to new
loggers when they are registered.
Tools using Robot Framework's internal modules should register their own
loggers at least to get notifications about errors and warnings. A shortcut
to get errors/warnings into console is using 'register_console_logger'.
"""
def __init__(self, register_console_logger=True):
self._loggers = LoggerCollection()
self._message_cache = []
self._console_logger = None
self._started_keywords = 0
if register_console_logger:
self.register_console_logger()
def disable_message_cache(self):
self._message_cache = None
def register_logger(self, *loggers):
for log in loggers:
logger = self._loggers.register_regular_logger(log)
self._relay_cached_messages_to(logger)
def register_context_changing_logger(self, logger):
log = self._loggers.register_context_changing_logger(logger)
self._relay_cached_messages_to(log)
def _relay_cached_messages_to(self, logger):
if self._message_cache:
for msg in self._message_cache[:]:
logger.message(msg)
def unregister_logger(self, *loggers):
for log in loggers:
self._loggers.unregister_logger(log)
def register_console_logger(self, width=78, colors='AUTO', markers='AUTO',
stdout=None, stderr=None):
logger = CommandLineMonitor(width, colors, markers, stdout, stderr)
if self._console_logger:
self._loggers.unregister_logger(self._console_logger)
self._console_logger = logger
self._loggers.register_regular_logger(logger)
def unregister_console_logger(self):
if not self._console_logger:
return None
logger = self._console_logger
self._loggers.unregister_logger(logger)
self._console_logger = None
return logger
# TODO: Remove in RF 2.9. Not used outside utests since 2.8.4 but may
# be used by external tools. Need to check that before removal.
disable_automatic_console_logger = unregister_console_logger
def register_file_logger(self, path=None, level='INFO'):
if not path:
path = os.environ.get('ROBOT_SYSLOG_FILE', 'NONE')
level = os.environ.get('ROBOT_SYSLOG_LEVEL', level)
if path.upper() == 'NONE':
return
try:
logger = FileLogger(path, level)
except DataError as err:
self.error("Opening syslog file '%s' failed: %s" % (path, unicode(err)))
else:
self.register_logger(logger)
def message(self, msg):
"""Messages about what the framework is doing, warnings, errors, ..."""
for logger in self._loggers.all_loggers():
logger.message(msg)
if self._message_cache is not None:
self._message_cache.append(msg)
def _log_message(self, msg):
"""Log messages written (mainly) by libraries"""
for logger in self._loggers.all_loggers():
logger.log_message(msg)
if msg.level == 'WARN':
self.message(msg)
log_message = message
def log_output(self, output):
for msg in StdoutLogSplitter(output):
self.log_message(msg)
def enable_library_import_logging(self):
self._prev_log_message = self.log_message
self.log_message = self.message
def disable_library_import_logging(self):
self.log_message = self._prev_log_message
def output_file(self, name, path):
"""Finished output, report, log, debug, or xunit file"""
for logger in self._loggers.all_loggers():
logger.output_file(name, path)
def close(self):
for logger in self._loggers.all_loggers():
logger.close()
self._loggers = LoggerCollection()
self._message_cache = []
def start_suite(self, suite):
for logger in self._loggers.starting_loggers():
logger.start_suite(suite)
def end_suite(self, suite):
for logger in self._loggers.ending_loggers():
logger.end_suite(suite)
def start_test(self, test):
for logger in self._loggers.starting_loggers():
logger.start_test(test)
def end_test(self, test):
for logger in self._loggers.ending_loggers():
logger.end_test(test)
def start_keyword(self, keyword):
self._started_keywords += 1
self.log_message = self._log_message
for logger in self._loggers.starting_loggers():
logger.start_keyword(keyword)
def end_keyword(self, keyword):
self._started_keywords -= 1
for logger in self._loggers.ending_loggers():
logger.end_keyword(keyword)
if not self._started_keywords:
self.log_message = self.message
def __iter__(self):
return iter(self._loggers)
class LoggerCollection(object):
def __init__(self):
self._regular_loggers = []
self._context_changing_loggers = []
def register_regular_logger(self, logger):
self._regular_loggers.append(_LoggerProxy(logger))
return self._regular_loggers[-1]
def register_context_changing_logger(self, logger):
self._context_changing_loggers.append(_LoggerProxy(logger))
return self._context_changing_loggers[-1]
# TODO: Remove in RF 2.9. Doesn't seem to be used anywhere since 2.8.4.
def remove_first_regular_logger(self):
return self._regular_loggers.pop(0)
def unregister_logger(self, logger):
self._regular_loggers = [proxy for proxy in self._regular_loggers
if proxy.logger is not logger]
self._context_changing_loggers = [proxy for proxy
in self._context_changing_loggers
if proxy.logger is not logger]
def starting_loggers(self):
return self.all_loggers()
def ending_loggers(self):
return self._regular_loggers + self._context_changing_loggers
def all_loggers(self):
return self._context_changing_loggers + self._regular_loggers
def __iter__(self):
return iter(self.all_loggers())
class _LoggerProxy(AbstractLoggerProxy):
_methods = ['message', 'log_message', 'output_file', 'close',
'start_suite', 'end_suite', 'start_test', 'end_test',
'start_keyword', 'end_keyword']
LOGGER = Logger()
|
{
"content_hash": "60d09f25e186b1537781456ac9238587",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 84,
"avg_line_length": 34.866336633663366,
"alnum_prop": 0.6284253869089876,
"repo_name": "userzimmermann/robotframework-python3",
"id": "d85cfc40e0954b9bf2e1d55369b2936d3209dbbb",
"size": "7651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot/output/logger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16539"
},
{
"name": "HTML",
"bytes": "1011996"
},
{
"name": "Java",
"bytes": "58737"
},
{
"name": "JavaScript",
"bytes": "159003"
},
{
"name": "Python",
"bytes": "2018310"
},
{
"name": "RobotFramework",
"bytes": "4288"
},
{
"name": "Shell",
"bytes": "1093"
}
],
"symlink_target": ""
}
|
import datetime
import trainer.corpora as crp
import trainer.features as ftr
import trainer.classifier_test as cls
import os
# vars
type = "bigram-stop"
nltk_run = True
sklearn_run = False
COUNT = 5000
cut = int((COUNT / 2) * 3 / 4)
array = [[True, False], [False, True], [True, True]]
def run(dataset):
nlt = dict()
skl = dict()
dir = "output/" + dataset + "/" + type + "/"
os.makedirs(dir, exist_ok=True)
# file
for variable in array:
var_name = str(variable[0]) + str(variable[1])
if nltk_run:
nlt_file = dir + dataset + "-" + type + "-" + var_name + "-nlt.csv"
nlt[var_name] = open(nlt_file, 'a')
nlt[var_name].write(str(datetime.datetime.today()) + "\n")
if sklearn_run:
skl_file = dir + dataset + "-" + type + "-" + var_name + "-skl.csv"
skl[var_name] = open(skl_file, 'a')
skl[var_name].write(str(datetime.datetime.today()) + "\n")
# cycle
for x in range(0, 10):
print(x)
corpora = crp.Corpora(dataset, count=COUNT, shuffle=True)
for variable in array:
print(str(variable))
var_name = str(variable[0]) + str(variable[1])
features = ftr.Features(corpora, total=COUNT, bigram=variable[0], stop=variable[1])
posfeats = features.get_features_pos()
negfeats = features.get_fearures_neg()
trainfeats = negfeats[:cut] + posfeats[:cut]
testfeats = negfeats[cut:] + posfeats[cut:]
nlt_output, skl_output = cls.train(trainfeats, testfeats, nlt=nltk_run, skl=sklearn_run)
if nltk_run:
print(str(nlt_output))
nlt[var_name].write(nlt_output)
nlt[var_name].flush()
if sklearn_run:
print(str(skl_output))
skl[var_name].write(skl_output)
skl[var_name].flush()
dataset_array = ["stwits"]
for dataset in dataset_array:
run(dataset)
|
{
"content_hash": "e0960b508406e8a5eb549be2415bcfb3",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 100,
"avg_line_length": 28.714285714285715,
"alnum_prop": 0.554726368159204,
"repo_name": "bromjiri/Presto",
"id": "e64f4b99f4787afc87b9e820149868a0b29ab421",
"size": "2010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trainer/tests/bigram.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "243637"
},
{
"name": "Shell",
"bytes": "1066"
}
],
"symlink_target": ""
}
|
from flask import Flask
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
from . import movies
from . import politics
from . import weather
api = movies.add_resources(api)
api = politics.add_resources(api)
api = weather.add_resources(api)
|
{
"content_hash": "df21c4d45c209a1ab0be791c1850b09d",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 39,
"avg_line_length": 18.066666666666666,
"alnum_prop": 0.7380073800738007,
"repo_name": "microstack/api_gateway",
"id": "54295518d2dea526c6ecfcc5dddc00227e45c2f7",
"size": "271",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api_gateway/api/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5978"
}
],
"symlink_target": ""
}
|
__author__ = 'Zhang Shaojun'
# listening address and port
CC_HOST = '172.27.0.2'
CC_PORT = 60521
# max message id
MAX_XID = 0xffffffff
# version of TransFormed Layered Controller
TFLC_VERSION_1 = 1
# packet_out timeout event
PACKET_OUT_TIMEOUT = 65536
# load_report interval
LOAD_REPORT_INTERVAL = 10
# the window datapath and corresponding out port
DPID_2_IS_WIN = {1: False, 2: False, 3: True, 4: True, 5: False, 6: False}
DPID_2_OUT_PORT = {3: 4, 4: 3}
|
{
"content_hash": "0cd82862d157f4bd533e9a661d84d13a",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 74,
"avg_line_length": 21.045454545454547,
"alnum_prop": 0.6976241900647948,
"repo_name": "halexan/Headquarters",
"id": "e91baf72980d92cf0bd7baf42621f49524909ad1",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/lc_client/cfg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "655285"
},
{
"name": "Shell",
"bytes": "1128"
}
],
"symlink_target": ""
}
|
from django import VERSION as djangoVersion
if djangoVersion[:2] >= (1, 8):
from django.db.backends.base.introspection import BaseDatabaseIntrospection, TableInfo
else:
from django.db.backends import BaseDatabaseIntrospection
from sqlanydb import ProgrammingError, OperationalError
import re
import sqlanydb
class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = { sqlanydb.DT_DATE : 'DateField',
sqlanydb.DT_TIME : 'DateTimeField',
sqlanydb.DT_TIMESTAMP : 'DateTimeField',
sqlanydb.DT_VARCHAR : 'CharField',
sqlanydb.DT_FIXCHAR : 'CharField',
sqlanydb.DT_LONGVARCHAR : 'CharField',
sqlanydb.DT_STRING : 'CharField',
sqlanydb.DT_DOUBLE : 'FloatField',
sqlanydb.DT_FLOAT : 'FloatField',
sqlanydb.DT_DECIMAL : 'IntegerField',
sqlanydb.DT_INT : 'IntegerField',
sqlanydb.DT_SMALLINT : 'IntegerField',
sqlanydb.DT_BINARY : 'BlobField',
sqlanydb.DT_LONGBINARY : 'BlobField',
sqlanydb.DT_TINYINT : 'IntegerField',
sqlanydb.DT_BIGINT : 'BigIntegerField',
sqlanydb.DT_UNSINT : 'IntegerField',
sqlanydb.DT_UNSSMALLINT : 'IntegerField',
sqlanydb.DT_UNSBIGINT : 'BigIntegerField',
sqlanydb.DT_BIT : 'IntegerField',
sqlanydb.DT_LONGNVARCHAR : 'CharField'
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute( "SELECT table_name,table_type FROM sys.SYSTAB WHERE "
"creator = USER_ID() and table_type in (1,2,3,4,21)" )
if djangoVersion[:2] < (1, 8):
return [row[0] for row in cursor.fetchall()]
return [TableInfo( row[0], 'v' if row[1] in ['2','21'] else 't' )
for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
cursor.execute("SELECT FIRST * FROM %s" %
self.connection.ops.quote_name(table_name))
return tuple((c[0], t, None, c[3], c[4], c[5], int(c[6]) == 1) for c, t in cursor.columns())
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name))])
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
my_field_dict = self._name_to_index(cursor, table_name)
constraints = []
relations = {}
cursor.execute("""
SELECT (fidx.column_id - 1), t2.table_name, (pidx.column_id - 1) FROM SYSTAB t1
INNER JOIN SYSFKEY f ON f.foreign_table_id = t1.table_id
INNER JOIN SYSTAB t2 ON t2.table_id = f.primary_table_id
INNER JOIN SYSIDXCOL fidx ON fidx.table_id = f.foreign_table_id AND fidx.index_id = f.foreign_index_id
INNER JOIN SYSIDXCOL pidx ON pidx.table_id = f.primary_table_id AND pidx.index_id = f.primary_index_id
WHERE t1.table_name = %s""", [table_name])
constraints.extend(cursor.fetchall())
for my_field_index, other_table, other_field_index in constraints:
relations[my_field_index] = (other_field_index, other_table)
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
# We need to skip multi-column indexes.
cursor.execute("""
select max(c.column_name),
max(ix.index_category),
max(ix."unique")
from SYSIDX ix, SYSTABLE t, SYSIDXCOL ixc, SYSCOLUMN c
where ix.table_id = t.table_id
and ixc.table_id = t.table_id
and ixc.index_id = ix.index_id
and ixc.table_id = c.table_id
and ixc.column_id = c.column_id
and t.table_name = %s
group by ix.index_id
having count(*) = 1
order by ix.index_id
""", [table_name])
indexes = {}
for col_name, cat, unique in cursor.fetchall():
indexes[col_name] = {
'primary_key': (cat == 1),
'unique': (unique == 1 or unique == 2) }
return indexes
|
{
"content_hash": "73f68f705d380af605f57789ab3060f2",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 114,
"avg_line_length": 48.42201834862385,
"alnum_prop": 0.5496400151572566,
"repo_name": "sqlanywhere/sqlany-django",
"id": "451af0216c6aab18f2dbe07c662b687d8e5a3ee3",
"size": "5278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqlany_django/introspection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "45814"
}
],
"symlink_target": ""
}
|
from freezegun import freeze_time
from CTFd.models import Awards, Fails, Solves, Users
from CTFd.schemas.users import UserSchema
from CTFd.utils import set_config
from CTFd.utils.crypto import verify_password
from tests.helpers import (
create_ctfd,
destroy_ctfd,
gen_award,
gen_challenge,
gen_fail,
gen_solve,
gen_user,
login_as_user,
register_user,
simulate_user_activity,
)
def test_api_users_get_public():
"""Can a user get /api/v1/users if users are public"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
set_config("account_visibility", "public")
r = client.get("/api/v1/users")
assert r.status_code == 200
set_config("account_visibility", "private")
r = client.get("/api/v1/users")
assert r.status_code == 302
set_config("account_visibility", "admins")
r = client.get("/api/v1/users")
assert r.status_code == 404
destroy_ctfd(app)
def test_api_users_get_private():
"""Can a user get /api/v1/users if users are public"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
set_config("account_visibility", "public")
r = client.get("/api/v1/users")
assert r.status_code == 200
set_config("account_visibility", "private")
r = client.get("/api/v1/users")
assert r.status_code == 302
set_config("account_visibility", "admins")
r = client.get("/api/v1/users")
assert r.status_code == 404
destroy_ctfd(app)
def test_api_users_get_admins():
"""Can a user get /api/v1/users if users are public"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
set_config("account_visibility", "public")
r = client.get("/api/v1/users")
assert r.status_code == 200
set_config("account_visibility", "private")
r = client.get("/api/v1/users")
assert r.status_code == 302
set_config("account_visibility", "admins")
r = client.get("/api/v1/users")
assert r.status_code == 404
destroy_ctfd(app)
def test_api_users_post_non_admin():
"""Can a user post /api/v1/users if not admin"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.post("/api/v1/users", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_users_post_admin():
"""Can a user post /api/v1/users if admin"""
app = create_ctfd()
with app.app_context():
with login_as_user(app, "admin") as client:
# Create user
r = client.post(
"/api/v1/users",
json={"name": "user", "email": "user@user.com", "password": "password"},
)
assert r.status_code == 200
# Make sure password was hashed properly
user = Users.query.filter_by(email="user@user.com").first()
assert user
assert verify_password("password", user.password)
# Make sure user can login with the creds
client = login_as_user(app)
r = client.get("/profile")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_users_post_admin_with_attributes():
"""Can a user post /api/v1/users with user settings"""
app = create_ctfd()
with app.app_context():
with login_as_user(app, "admin") as client:
# Create user
r = client.post(
"/api/v1/users",
json={
"name": "user",
"email": "user@user.com",
"password": "password",
"banned": True,
"hidden": True,
"verified": True,
},
)
assert r.status_code == 200
# Make sure password was hashed properly
user = Users.query.filter_by(email="user@user.com").first()
assert user
assert verify_password("password", user.password)
assert user.banned
assert user.hidden
assert user.verified
destroy_ctfd(app)
def test_api_users_post_admin_duplicate_information():
"""Can an admin create a user with duplicate information"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app, "admin") as client:
# Duplicate email
r = client.post(
"/api/v1/users",
json={
"name": "user2",
"email": "user@examplectf.com",
"password": "password",
},
)
resp = r.get_json()
assert r.status_code == 400
assert resp["errors"]["email"]
assert resp["success"] is False
assert Users.query.count() == 2
# Duplicate user
r = client.post(
"/api/v1/users",
json={
"name": "user",
"email": "user2@examplectf.com",
"password": "password",
},
)
resp = r.get_json()
assert r.status_code == 400
assert resp["errors"]["name"]
assert resp["success"] is False
assert Users.query.count() == 2
destroy_ctfd(app)
def test_api_users_patch_admin_duplicate_information():
"""Can an admin modify a user with duplicate information"""
app = create_ctfd()
with app.app_context():
register_user(
app, name="user1", email="user1@examplectf.com", password="password"
)
register_user(
app, name="user2", email="user2@examplectf.com", password="password"
)
with login_as_user(app, "admin") as client:
# Duplicate name
r = client.patch(
"/api/v1/users/1",
json={
"name": "user2",
"email": "user@examplectf.com",
"password": "password",
},
)
resp = r.get_json()
assert r.status_code == 400
assert resp["errors"]["name"]
assert resp["success"] is False
# Duplicate email
r = client.patch(
"/api/v1/users/1",
json={
"name": "user",
"email": "user2@examplectf.com",
"password": "password",
},
)
resp = r.get_json()
assert r.status_code == 400
assert resp["errors"]["email"]
assert resp["success"] is False
assert Users.query.count() == 3
destroy_ctfd(app)
def test_api_users_patch_duplicate_information():
"""Can a user modify their information to another user's"""
app = create_ctfd()
with app.app_context():
register_user(
app, name="user1", email="user1@examplectf.com", password="password"
)
register_user(
app, name="user2", email="user2@examplectf.com", password="password"
)
with login_as_user(app, "user1") as client:
# Duplicate email
r = client.patch(
"/api/v1/users/me",
json={
"name": "user1",
"email": "user2@examplectf.com",
"confirm": "password",
},
)
resp = r.get_json()
assert r.status_code == 400
assert resp["errors"]["email"]
assert resp["success"] is False
# Duplicate user
r = client.patch(
"/api/v1/users/me",
json={
"name": "user2",
"email": "user1@examplectf.com",
"confirm": "password",
},
)
resp = r.get_json()
assert r.status_code == 400
assert resp["errors"]["name"]
assert resp["success"] is False
assert Users.query.count() == 3
destroy_ctfd(app)
def test_api_team_get_public():
"""Can a user get /api/v1/team/<user_id> if users are public"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
set_config("account_visibility", "public")
gen_user(app.db)
r = client.get("/api/v1/users/2")
assert r.status_code == 200
set_config("account_visibility", "private")
r = client.get("/api/v1/users/2")
assert r.status_code == 302
set_config("account_visibility", "admins")
r = client.get("/api/v1/users/2")
assert r.status_code == 404
destroy_ctfd(app)
def test_api_team_get_private():
"""Can a user get /api/v1/users/<user_id> if users are private"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
set_config("account_visibility", "public")
r = client.get("/api/v1/users/2")
print(r.__dict__)
assert r.status_code == 200
set_config("account_visibility", "private")
r = client.get("/api/v1/users/2")
assert r.status_code == 200
set_config("account_visibility", "admins")
r = client.get("/api/v1/users/2")
assert r.status_code == 404
destroy_ctfd(app)
def test_api_team_get_admin():
"""Can a user get /api/v1/users/<user_id> if users are viewed by admins only"""
app = create_ctfd()
with app.app_context():
with login_as_user(app, "admin") as client:
gen_user(app.db)
set_config("account_visibility", "public")
r = client.get("/api/v1/users/2")
assert r.status_code == 200
set_config("account_visibility", "private")
r = client.get("/api/v1/users/2")
assert r.status_code == 200
set_config("account_visibility", "admins")
r = client.get("/api/v1/users/2")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_patch_non_admin():
"""Can a user patch /api/v1/users/<user_id> if not admin"""
app = create_ctfd()
with app.app_context():
register_user(app)
with app.test_client() as client:
r = client.patch("/api/v1/users/2", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_user_patch_admin():
"""Can a user patch /api/v1/users/<user_id> if admin"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app, "admin") as client:
r = client.patch(
"/api/v1/users/2",
json={
"name": "user",
"email": "user@examplectf.com",
"password": "password",
"country": "US",
"verified": True,
},
)
assert r.status_code == 200
user_data = r.get_json()["data"]
assert user_data["country"] == "US"
assert user_data["verified"] is True
destroy_ctfd(app)
def test_api_user_delete_non_admin():
"""Can a user delete /api/v1/users/<user_id> if not admin"""
app = create_ctfd()
with app.app_context():
register_user(app)
with app.test_client() as client:
r = client.delete("/api/v1/teams/2", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_user_delete_admin():
"""Can a user patch /api/v1/users/<user_id> if admin"""
app = create_ctfd()
with app.app_context():
register_user(app)
user = Users.query.filter_by(id=2).first()
simulate_user_activity(app.db, user=user)
with login_as_user(app, "admin") as client:
r = client.delete("/api/v1/users/2", json="")
assert r.status_code == 200
assert r.get_json().get("data") is None
assert Users.query.filter_by(id=2).first() is None
destroy_ctfd(app)
def test_api_user_get_me_not_logged_in():
"""Can a user get /api/v1/users/me if not logged in"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get("/api/v1/users/me")
assert r.status_code == 302
destroy_ctfd(app)
def test_api_user_get_me_logged_in():
"""Can a user get /api/v1/users/me if logged in"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get("/api/v1/users/me")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_patch_me_not_logged_in():
"""Can a user patch /api/v1/users/me if not logged in"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.patch("/api/v1/users/me", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_user_patch_me_logged_in():
"""Can a user patch /api/v1/users/me if logged in"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.patch(
"/api/v1/users/me",
json={
"name": "user",
"email": "user@examplectf.com",
"password": "password",
"confirm": "password",
"country": "US",
},
)
assert r.status_code == 200
assert r.get_json()["data"]["country"] == "US"
destroy_ctfd(app)
def test_api_admin_user_patch_me_logged_in():
"""Can an admin patch /api/v1/users/me"""
app = create_ctfd()
with app.app_context():
with login_as_user(app, name="admin") as client:
r = client.patch(
"/api/v1/users/me",
json={
"name": "user",
"email": "user@examplectf.com",
"password": "password",
"confirm": "password",
"country": "US",
},
)
assert r.status_code == 200
assert r.get_json()["data"]["country"] == "US"
user = Users.query.filter_by(id=1).first()
assert user.name == "user"
assert user.email == "user@examplectf.com"
destroy_ctfd(app)
def test_api_user_change_name():
"""Can a user change their name via the API"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.patch("/api/v1/users/me", json={"name": "user2"})
assert r.status_code == 200
resp = r.get_json()
assert resp["data"]["name"] == "user2"
assert resp["success"] is True
r = client.patch("/api/v1/users/me", json={"name": None})
resp = r.get_json()
print(resp)
assert r.status_code == 400
assert resp["errors"]["name"] == ["Field may not be null."]
assert resp["success"] is False
set_config("name_changes", False)
r = client.patch("/api/v1/users/me", json={"name": "new_name"})
assert r.status_code == 400
resp = r.get_json()
assert "name" in resp["errors"]
assert resp["success"] is False
set_config("name_changes", True)
r = client.patch("/api/v1/users/me", json={"name": "new_name"})
assert r.status_code == 200
resp = r.get_json()
assert resp["data"]["name"] == "new_name"
assert resp["success"] is True
destroy_ctfd(app)
def test_api_user_change_email():
"""Test that users can change their email via the API"""
app = create_ctfd()
with app.app_context():
register_user(app)
user = Users.query.filter_by(id=2).first()
app.db.session.commit()
with login_as_user(app) as client:
# Test users can't submit null
r = client.patch(
"/api/v1/users/me", json={"email": None, "confirm": "password"}
)
resp = r.get_json()
print(resp)
assert r.status_code == 400
assert resp["errors"]["email"] == ["Field may not be null."]
# Test users can exercise the API
r = client.patch(
"/api/v1/users/me",
json={"email": "new_email@email.com", "confirm": "password"},
)
assert r.status_code == 200
resp = r.get_json()
assert resp["data"]["email"] == "new_email@email.com"
assert resp["success"] is True
user = Users.query.filter_by(id=2).first()
assert user.email == "new_email@email.com"
destroy_ctfd(app)
def test_api_user_change_verify_email():
"""Test that users are marked unconfirmed if they change their email and verify_emails is turned on"""
app = create_ctfd()
with app.app_context():
set_config("verify_emails", True)
register_user(app)
user = Users.query.filter_by(id=2).first()
user.verified = True
app.db.session.commit()
with login_as_user(app) as client:
r = client.patch(
"/api/v1/users/me",
json={"email": "new_email@email.com", "confirm": "password"},
)
assert r.status_code == 200
resp = r.get_json()
assert resp["data"]["email"] == "new_email@email.com"
assert resp["success"] is True
user = Users.query.filter_by(id=2).first()
assert user.verified is False
destroy_ctfd(app)
def test_api_user_change_email_under_whitelist():
"""Test that users can only change emails to ones in the whitelist"""
app = create_ctfd()
with app.app_context():
register_user(app)
set_config(
"domain_whitelist", "whitelisted.com, whitelisted.org, whitelisted.net"
)
with login_as_user(app) as client:
r = client.patch(
"/api/v1/users/me",
json={"email": "new_email@email.com", "confirm": "password"},
)
assert r.status_code == 400
resp = r.get_json()
assert resp["errors"]["email"]
assert resp["success"] is False
r = client.patch(
"/api/v1/users/me",
json={"email": "new_email@whitelisted.com", "confirm": "password"},
)
assert r.status_code == 200
resp = r.get_json()
assert resp["data"]["email"] == "new_email@whitelisted.com"
assert resp["success"] is True
destroy_ctfd(app)
def test_api_user_get_me_solves_not_logged_in():
"""Can a user get /api/v1/users/me/solves if not logged in"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get("/api/v1/users/me/solves", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_user_get_me_solves_logged_in():
"""Can a user get /api/v1/users/me/solves if logged in"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get("/api/v1/users/me/solves")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_get_solves():
"""Can a user get /api/v1/users/<user_id>/solves if logged in"""
app = create_ctfd(user_mode="users")
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get("/api/v1/users/2/solves")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_get_solves_after_freze_time():
"""Can a user get /api/v1/users/<user_id>/solves after freeze time"""
app = create_ctfd(user_mode="users")
with app.app_context():
register_user(app, name="user1", email="user1@examplectf.com")
register_user(app, name="user2", email="user2@examplectf.com")
# Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
set_config("freeze", "1507262400")
with freeze_time("2017-10-4"):
chal = gen_challenge(app.db)
chal_id = chal.id
gen_solve(app.db, user_id=2, challenge_id=chal_id)
chal2 = gen_challenge(app.db)
chal2_id = chal2.id
with freeze_time("2017-10-8"):
chal2 = gen_solve(app.db, user_id=2, challenge_id=chal2_id)
# There should now be two solves assigned to the same user.
assert Solves.query.count() == 2
# User 2 should have 2 solves when seen by themselves
client = login_as_user(app, name="user1")
r = client.get("/api/v1/users/me/solves")
data = r.get_json()["data"]
assert len(data) == 2
# User 2 should have 1 solve when seen by another user
client = login_as_user(app, name="user2")
r = client.get("/api/v1/users/2/solves")
data = r.get_json()["data"]
assert len(data) == 1
# Admins should see all solves for the user
admin = login_as_user(app, name="admin")
r = admin.get("/api/v1/users/2/solves")
data = r.get_json()["data"]
assert len(data) == 2
destroy_ctfd(app)
def test_api_user_get_me_fails_not_logged_in():
"""Can a user get /api/v1/users/me/fails if not logged in"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get("/api/v1/users/me/fails", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_user_get_me_fails_logged_in():
"""Can a user get /api/v1/users/me/fails if logged in"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get("/api/v1/users/me/fails")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_get_fails():
"""Can a user get /api/v1/users/<user_id>/fails if logged in"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get("/api/v1/users/2/fails")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_get_fails_after_freze_time():
"""Can a user get /api/v1/users/<user_id>/fails after freeze time"""
app = create_ctfd(user_mode="users")
with app.app_context():
register_user(app, name="user1", email="user1@examplectf.com")
register_user(app, name="user2", email="user2@examplectf.com")
# Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
set_config("freeze", "1507262400")
with freeze_time("2017-10-4"):
chal = gen_challenge(app.db)
chal_id = chal.id
chal2 = gen_challenge(app.db)
chal2_id = chal2.id
gen_fail(app.db, user_id=2, challenge_id=chal_id)
with freeze_time("2017-10-8"):
chal2 = gen_fail(app.db, user_id=2, challenge_id=chal2_id)
# There should now be two fails assigned to the same user.
assert Fails.query.count() == 2
# User 2 should have 2 fail when seen by themselves
client = login_as_user(app, name="user1")
r = client.get("/api/v1/users/me/fails")
assert r.get_json()["meta"]["count"] == 2
# User 2 should have 1 fail when seen by another user
client = login_as_user(app, name="user2")
r = client.get("/api/v1/users/2/fails")
assert r.get_json()["meta"]["count"] == 1
# Admins should see all fails for the user
admin = login_as_user(app, name="admin")
r = admin.get("/api/v1/users/2/fails")
assert r.get_json()["meta"]["count"] == 2
destroy_ctfd(app)
def test_api_user_get_me_awards_not_logged_in():
"""Can a user get /api/v1/users/me/awards if not logged in"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get("/api/v1/users/me/awards", json="")
assert r.status_code == 403
destroy_ctfd(app)
def test_api_user_get_me_awards_logged_in():
"""Can a user get /api/v1/users/me/awards if logged in"""
app = create_ctfd(user_mode="users")
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get("/api/v1/users/me/awards")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_get_awards():
"""Can a user get /api/v1/users/<user_id>/awards if logged in"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.get("/api/v1/users/2/awards")
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_get_awards_after_freze_time():
"""Can a user get /api/v1/users/<user_id>/awards after freeze time"""
app = create_ctfd(user_mode="users")
with app.app_context():
register_user(app, name="user1", email="user1@examplectf.com")
register_user(app, name="user2", email="user2@examplectf.com")
# Friday, October 6, 2017 12:00:00 AM GMT-04:00 DST
set_config("freeze", "1507262400")
with freeze_time("2017-10-4"):
gen_award(app.db, user_id=2)
with freeze_time("2017-10-8"):
gen_award(app.db, user_id=2)
# There should now be two awards assigned to the same user.
assert Awards.query.count() == 2
# User 2 should have 2 awards when seen by themselves
client = login_as_user(app, name="user1")
r = client.get("/api/v1/users/me/awards")
data = r.get_json()["data"]
assert len(data) == 2
# User 2 should have 1 award when seen by another user
client = login_as_user(app, name="user2")
r = client.get("/api/v1/users/2/awards")
data = r.get_json()["data"]
assert len(data) == 1
# Admins should see all awards for the user
admin = login_as_user(app, name="admin")
r = admin.get("/api/v1/users/2/awards")
data = r.get_json()["data"]
assert len(data) == 2
destroy_ctfd(app)
def test_api_accessing_hidden_users():
"""Hidden users should not be visible to normal users, only to admins"""
app = create_ctfd()
with app.app_context():
register_user(app, name="visible_user", email="visible_user@examplectf.com")
register_user(
app, name="hidden_user", email="hidden_user@examplectf.com"
) # ID 3
user = Users.query.filter_by(name="hidden_user").first()
user.hidden = True
app.db.session.commit()
with login_as_user(app, name="visible_user") as client:
list_users = client.get("/api/v1/users").get_json()["data"]
assert len(list_users) == 1
assert client.get("/api/v1/users/3").status_code == 404
assert client.get("/api/v1/users/3/solves").status_code == 404
assert client.get("/api/v1/users/3/fails").status_code == 404
assert client.get("/api/v1/users/3/awards").status_code == 404
with login_as_user(app, name="admin") as client:
# Admins see the user in lists
list_users = client.get("/api/v1/users?view=admin").get_json()["data"]
assert len(list_users) == 3
assert client.get("/api/v1/users/3").status_code == 200
assert client.get("/api/v1/users/3/solves").status_code == 200
assert client.get("/api/v1/users/3/fails").status_code == 200
assert client.get("/api/v1/users/3/awards").status_code == 200
destroy_ctfd(app)
def test_api_accessing_banned_users():
"""Banned users should not be visible to normal users, only to admins"""
app = create_ctfd()
with app.app_context():
register_user(app, name="visible_user", email="visible_user@examplectf.com")
register_user(
app, name="banned_user", email="banned_user@examplectf.com"
) # ID 3
user = Users.query.filter_by(name="banned_user").first()
user.banned = True
app.db.session.commit()
with login_as_user(app, name="visible_user") as client:
list_users = client.get("/api/v1/users").get_json()["data"]
assert len(list_users) == 1
assert client.get("/api/v1/users/3").status_code == 404
assert client.get("/api/v1/users/3/solves").status_code == 404
assert client.get("/api/v1/users/3/fails").status_code == 404
assert client.get("/api/v1/users/3/awards").status_code == 404
with login_as_user(app, name="admin") as client:
# Admins see the user in lists
list_users = client.get("/api/v1/users?view=admin").get_json()["data"]
assert len(list_users) == 3
assert client.get("/api/v1/users/3").status_code == 200
assert client.get("/api/v1/users/3/solves").status_code == 200
assert client.get("/api/v1/users/3/fails").status_code == 200
assert client.get("/api/v1/users/3/awards").status_code == 200
destroy_ctfd(app)
def test_api_user_send_email():
"""Can an admin post /api/v1/users/<user_id>/email"""
app = create_ctfd()
with app.app_context():
register_user(app)
with login_as_user(app) as client:
r = client.post(
"/api/v1/users/2/email", json={"text": "email should get rejected"}
)
assert r.status_code == 403
with login_as_user(app, "admin") as admin:
r = admin.post(
"/api/v1/users/2/email", json={"text": "email should be accepted"}
)
assert r.get_json() == {
"success": False,
"errors": {"": ["Email settings not configured"]},
}
assert r.status_code == 400
set_config("verify_emails", True)
set_config("mail_server", "localhost")
set_config("mail_port", 25)
set_config("mail_useauth", True)
set_config("mail_username", "username")
set_config("mail_password", "password")
with login_as_user(app, "admin") as admin:
r = admin.post("/api/v1/users/2/email", json={"text": ""})
assert r.get_json() == {
"success": False,
"errors": {"text": ["Email text cannot be empty"]},
}
assert r.status_code == 400
with login_as_user(app, "admin") as admin:
r = admin.post(
"/api/v1/users/2/email", json={"text": "email should be accepted"}
)
assert r.status_code == 200
destroy_ctfd(app)
def test_api_user_get_schema():
"""Can a user get /api/v1/users/<user_id> doesn't return unnecessary data"""
app = create_ctfd()
with app.app_context():
register_user(app, name="user1", email="user1@examplectf.com") # ID 2
register_user(app, name="user2", email="user2@examplectf.com") # ID 3
with app.test_client() as client:
r = client.get("/api/v1/users/3")
data = r.get_json()["data"]
assert sorted(data.keys()) == sorted(
UserSchema.views["user"] + ["score", "place"]
)
with login_as_user(app, name="user1") as client:
r = client.get("/api/v1/users/3")
data = r.get_json()["data"]
assert sorted(data.keys()) == sorted(
UserSchema.views["user"] + ["score", "place"]
)
destroy_ctfd(app)
|
{
"content_hash": "f09921a91041188ddb8a5b8417c66e58",
"timestamp": "",
"source": "github",
"line_count": 918,
"max_line_length": 106,
"avg_line_length": 35.58278867102396,
"alnum_prop": 0.534517067197306,
"repo_name": "ajvpot/CTFd",
"id": "6cf01793b07d8a592571da800e1e582f3a556fa5",
"size": "32712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/api/v1/test_users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3584"
},
{
"name": "HTML",
"bytes": "59213"
},
{
"name": "JavaScript",
"bytes": "26907"
},
{
"name": "Python",
"bytes": "75532"
},
{
"name": "Shell",
"bytes": "116"
}
],
"symlink_target": ""
}
|
"""
esky.bdist_esky: distutils command to freeze apps in esky format
Importing this module makes "bdist_esky" available as a distutils command.
This command will freeze the given scripts and package them into a zipfile
named with the application name, version and platform.
The main interface is the 'Esky' class, which represents a frozen app. An Esky
must be given the path to the top-level directory of the frozen app, and a
'VersionFinder' object that it will use to search for updates.
"""
from __future__ import with_statement
import os
import sys
import shutil
import tempfile
import hashlib
import inspect
import json
from glob import glob
import distutils.command
from distutils.core import Command
from distutils.util import convert_path
import esky.patch
from esky.util import get_platform, create_zipfile, \
split_app_version, join_app_version, ESKY_CONTROL_DIR, \
ESKY_APPDATA_DIR, really_rmtree
if sys.platform == "win32":
from esky import winres
from xml.dom import minidom
try:
from esky.bdist_esky import pypyc
except ImportError, e:
pypyc = None
PYPYC_ERROR = e
COMPILED_BOOTSTRAP_CACHE = None
else:
COMPILED_BOOTSTRAP_CACHE = os.path.dirname(__file__)
if not os.path.isdir(COMPILED_BOOTSTRAP_CACHE):
COMPILED_BOOTSTRAP_CACHE = None
# setuptools likes to be imported before anything else that
# might monkey-patch distutils. We don't actually use it,
# this is just to avoid errors with cx_Freeze.
try:
import setuptools
except ImportError:
pass
_FREEZERS = {}
try:
from esky.bdist_esky import f_py2exe
_FREEZERS["py2exe"] = f_py2exe
except ImportError:
_FREEZERS["py2exe"] = None
try:
from esky.bdist_esky import f_py2app
_FREEZERS["py2app"] = f_py2app
except ImportError:
_FREEZERS["py2app"] = None
try:
from esky.bdist_esky import f_bbfreeze
_FREEZERS["bbfreeze"] = f_bbfreeze
except ImportError:
_FREEZERS["bbfreeze"] = None
try:
from esky.bdist_esky import f_cxfreeze
_FREEZERS["cxfreeze"] = f_cxfreeze
_FREEZERS["cx_Freeze"] = f_cxfreeze
_FREEZERS["cx_freeze"] = f_cxfreeze
except ImportError:
_FREEZERS["cxfreeze"] = None
_FREEZERS["cx_Freeze"] = None
_FREEZERS["cx_freeze"] = None
class Executable(unicode):
"""Class to hold information about a specific executable.
This class provides a uniform way to specify extra meta-data about
a frozen executable. By setting various keyword arguments, you can
specify e.g. the icon, and whether it is a gui-only script.
Some freezer modules require all items in the "scripts" argument to
be strings naming real files. This is therefore a subclass of unicode,
and if it refers only to in-memory code then its string value will be
the path to this very file. I know it's ugly, but it works.
"""
def __new__(cls,script,**kwds):
if isinstance(script,basestring):
return unicode.__new__(cls,script)
else:
return unicode.__new__(cls,__file__)
def __init__(self,script,name=None,icon=None,gui_only=None,
include_in_bootstrap_env=True,**kwds):
unicode.__init__(self)
if isinstance(script,Executable):
script = script.script
if name is None:
name = script.name
if gui_only is None:
gui_only = script.gui_only
if not isinstance(script,basestring):
if name is None:
raise TypeError("Must specify name if script is not a file")
self.script = script
self.include_in_bootstrap_env = include_in_bootstrap_env
self.icon = icon
self._name = name
self._gui_only = gui_only
self._kwds = kwds
@property
def name(self):
if self._name is not None:
nm = self._name
else:
if not isinstance(self.script,basestring):
raise TypeError("Must specify name if script is not a file")
nm = os.path.basename(self.script)
if nm.endswith(".py"):
nm = nm[:-3]
elif nm.endswith(".pyw"):
nm = nm[:-4]
if sys.platform == "win32" and not nm.endswith(".exe"):
nm += ".exe"
return nm
@property
def gui_only(self):
if self._gui_only is None:
if not isinstance(self.script,basestring):
return False
else:
return self.script.endswith(".pyw")
else:
return self._gui_only
class bdist_esky(Command):
"""Create a frozen application in 'esky' format.
This distutils command can be used to freeze an application in the
format expected by esky. It interprets the following standard
distutils options:
scripts: list of scripts to freeze as executables;
to make a gui-only script, name it 'script.pyw'
data_files: copied into the frozen app directory
package_data: copied into library.zip alongside the module code
To further customize the behaviour of the bdist_esky command, you can
specify the following custom options:
includes: a list of modules to explicitly include in the freeze
excludes: a list of modules to explicitly exclude from the freeze
freezer_module: name of freezer module to use; currently py2exe,
py2app, bbfreeze and cx-freeze are supported.
freezer_options: dict of options to pass through to the underlying
freezer module.
bootstrap_module: a custom module to use for esky bootstrapping;
the default calls esky.bootstrap.bootstrap()
bootstrap_code: a custom code string to use for esky bootstrapping;
this precludes the use of the bootstrap_module option.
If a non-string object is given, its source is taken
using inspect.getsource().
compile_bootstrap_exes: whether to compile the bootstrapping code to a
stand-alone exe; this requires PyPy installed
and the bootstrap code to be valid RPython.
When false, the bootstrap env will use a
trimmed-down copy of the freezer module exe.
dont_run_startup_hooks: don't force all executables to call
esky.run_startup_hooks() on startup.
bundle_msvcrt: whether to bundle the MSVCRT DLLs, manifest files etc
as a private assembly. The default is False; only
those with a valid license to redistriute these files
should enable it.
pre_freeze_callback: function to call just before starting to freeze
the application; this is a good opportunity to
customize the bdist_esky instance.
pre_zip_callback: function to call just before starting to zip up
the frozen application; this is a good opportunity
to e.g. sign the resulting executables.
"""
description = "create a frozen app in 'esky' format"
user_options = [
('dist-dir=', 'd',
"directory to put final built distributions in"),
('freezer-module=', None,
"module to use for freezing the application"),
('freezer-options=', None,
"options to pass to the underlying freezer module"),
('bootstrap-module=', None,
"module to use for bootstrapping the application"),
('bootstrap-code=', None,
"code to use for bootstrapping the application"),
('compile-bootstrap-exes=', None,
"whether to compile the bootstrapping exes with pypy"),
('bundle-msvcrt=', None,
"whether to bundle MSVCRT as private assembly"),
('includes=', None,
"list of modules to specifically include"),
('excludes=', None,
"list of modules to specifically exclude"),
('dont-run-startup-hooks=', None,
"don't force execution of esky.run_startup_hooks()"),
('pre-freeze-callback=', None,
"function to call just before starting to freeze the app"),
('pre-zip-callback=', None,
"function to call just before starting to zip up the app"),
('enable-appdata-dir=', None,
"enable new 'appdata' directory layout (will go away after the 0.9.X series)"),
('detached-bootstrap-library=', None,
"By default Esky appends the library.zip to the bootstrap executable when using CX_Freeze, this will tell esky to not do that, but create a separate library.zip instead"),
('compress=', 'c',
"Compression options of the Esky, use lower case for compressed or upper case for uncompressed, currently only support zip files"),
]
boolean_options = ["bundle-msvcrt","dont-run-startup-hooks","compile-bootstrap-exes","enable-appdata-dir"]
def initialize_options(self):
self.dist_dir = None
self.includes = []
self.excludes = []
self.freezer_module = None
self.freezer_options = {}
self.bundle_msvcrt = False
self.dont_run_startup_hooks = False
self.bootstrap_module = None
self.bootstrap_code = None
self.compile_bootstrap_exes = False
self._compiled_exes = {}
self.pre_freeze_callback = None
self.pre_zip_callback = None
self.enable_appdata_dir = False
self.detached_bootstrap_library = False
self.compress = 'zip'
def finalize_options(self):
assert self.compress in (False, None, 'false', 'none', 'zip', 'ZIP'), 'Bad options passed to compress'
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
if self.compile_bootstrap_exes and pypyc is None:
raise PYPYC_ERROR
if self.freezer_module is None:
for freezer_module in ("py2exe","py2app","bbfreeze","cxfreeze"):
self.freezer_module = _FREEZERS[freezer_module]
if self.freezer_module is not None:
break
else:
err = "no supported freezer modules found"
err += " (try installing bbfreeze)"
raise RuntimeError(err)
else:
try:
freezer = _FREEZERS[self.freezer_module]
except KeyError:
err = "freezer module not supported: '%s'"
err = err % (self.freezer_module,)
raise RuntimeError(err)
else:
if freezer is None:
err = "freezer module not found: '%s'"
err = err % (self.freezer_module,)
raise RuntimeError(err)
self.freezer_module = freezer
if isinstance(self.pre_freeze_callback,basestring):
self.pre_freeze_callback = self._name2func(self.pre_freeze_callback)
if isinstance(self.pre_zip_callback,basestring):
self.pre_zip_callback = self._name2func(self.pre_zip_callback)
def _name2func(self,name):
"""Convert a dotted name into a function reference."""
if "." not in name:
return globals()[name]
modname,funcname = name.rsplit(".",1)
mod = __import__(modname,fromlist=[funcname])
return getattr(mod,funcname)
def run(self):
self.tempdir = tempfile.mkdtemp()
try:
self._run()
finally:
really_rmtree(self.tempdir)
def _run(self):
self._run_initialise_dirs()
if self.pre_freeze_callback is not None:
self.pre_freeze_callback(self)
self._run_freeze_scripts()
if self.pre_zip_callback is not None:
self.pre_zip_callback(self)
self._generate_filelist_manifest()
self._run_create_zipfile()
def _run_initialise_dirs(self):
"""Create the dirs into which to freeze the app."""
fullname = self.distribution.get_fullname()
platform = get_platform()
self.bootstrap_dir = os.path.join(self.dist_dir,
"%s.%s"%(fullname,platform,))
if self.enable_appdata_dir:
self.freeze_dir = os.path.join(self.bootstrap_dir,ESKY_APPDATA_DIR,
"%s.%s"%(fullname,platform,))
else:
self.freeze_dir = os.path.join(self.bootstrap_dir,
"%s.%s"%(fullname,platform,))
if os.path.exists(self.bootstrap_dir):
really_rmtree(self.bootstrap_dir)
os.makedirs(self.freeze_dir)
def _run_freeze_scripts(self):
"""Call the selected freezer module to freeze the scripts."""
fullname = self.distribution.get_fullname()
platform = get_platform()
self.freezer_module.freeze(self)
if platform != "win32":
lockfile = os.path.join(self.freeze_dir,ESKY_CONTROL_DIR,"lockfile.txt")
with open(lockfile,"w") as lf:
lf.write("this file is used by esky to lock the version dir\n")
def _generate_filelist_manifest(self):
"""Create a list of all the files in application"""
filelist_file = os.path.join(self.freeze_dir,ESKY_CONTROL_DIR, esky.patch.ESKY_FILELIST)
esky_files = []
for root, dirs, files in os.walk(self.bootstrap_dir):
for f in files:
esky_files.append(os.path.join(os.path.relpath(root, self.bootstrap_dir), f))
with open(filelist_file, 'w') as f:
f.write(json.dumps(esky_files))
def _run_create_zipfile(self):
"""Zip up the final distribution."""
if self.compress:
fullname = self.distribution.get_fullname()
platform = get_platform()
zfname = os.path.join(self.dist_dir,"%s.%s.zip"%(fullname,platform,))
if hasattr(self.freezer_module,"zipit"):
self.freezer_module.zipit(self,self.bootstrap_dir,zfname)
else:
if self.compress == 'zip':
print "zipping up the esky with compression"
create_zipfile(self.bootstrap_dir,zfname,compress=True)
really_rmtree(self.bootstrap_dir)
elif self.compress == 'ZIP':
print "zipping up the esky without compression"
create_zipfile(self.bootstrap_dir,zfname,compress=False)
really_rmtree(self.bootstrap_dir)
else:
print("To zip the esky use compress or c set to ZIP or zip")
def _obj2code(self,obj):
"""Convert an object to some python source code.
Iterables are flattened, None is elided, strings are included verbatim,
open files are read and anything else is passed to inspect.getsource().
"""
if obj is None:
return ""
if isinstance(obj,basestring):
return obj
if hasattr(obj,"read"):
return obj.read()
try:
return "\n\n\n".join(self._obj2code(i) for i in obj)
except TypeError:
return inspect.getsource(obj)
def get_bootstrap_code(self):
"""Get any extra code to be executed by the bootstrapping exe.
This method interprets the bootstrap-code and bootstrap-module settings
to construct any extra bootstrapping code that must be executed by
the frozen bootstrap executable. It is returned as a string.
"""
bscode = self.bootstrap_code
if bscode is None:
if self.bootstrap_module is not None:
bscode = __import__(self.bootstrap_module)
for submod in self.bootstrap_module.split(".")[1:]:
bscode = getattr(bscode,submod)
bscode = self._obj2code(bscode)
return bscode
def get_executables(self,normalise=True):
"""Yield a normalised Executable instance for each script to be frozen.
If "normalise" is True (the default) then the user-provided scripts
will be rewritten to decode any non-filename items specified as part
of the script, and to include the esky startup code. If the freezer
has a better way of doing these things, it should pass normalise=False.
"""
if normalise:
if not os.path.exists(os.path.join(self.tempdir,"scripts")):
os.mkdir(os.path.join(self.tempdir,"scripts"))
if self.distribution.has_scripts():
for s in self.distribution.scripts:
if isinstance(s,Executable):
exe = s
else:
exe = Executable(s)
if normalise:
# Give the normalised script file a name matching that
# specified, since some freezers only take the filename.
name = exe.name
if sys.platform == "win32" and name.endswith(".exe"):
name = name[:-4]
if exe.endswith(".pyw"):
ext = ".pyw"
else:
ext = ".py"
script = os.path.join(self.tempdir,"scripts",name+ext)
# Get the code for the target script.
# If it's a single string then interpret it as a filename,
# otherwise feed it into the _obj2code logic.
if isinstance(exe.script,basestring):
with open(exe.script,"rt") as f:
code = f.read()
else:
code = self._obj2code(exe.script)
# Check that the code actually compiles - sometimes it
# can be hard to get a good message out of the freezer.
compile(code,"","exec")
# Augment the given code with special esky-related logic.
with open(script,"wt") as fOut:
lines = (ln+"\n" for ln in code.split("\n"))
# Keep any leading comments and __future__ imports
# at the start of the file.
for ln in lines:
if ln.strip():
if not ln.strip().startswith("#"):
if "__future__" not in ln:
break
fOut.write(ln)
# Run the startup hooks before any actual code.
if not self.dont_run_startup_hooks:
fOut.write("import esky\n")
fOut.write("esky.run_startup_hooks()\n")
fOut.write("\n")
# Then just include the rest of the script code.
fOut.write(ln)
for ln in lines:
fOut.write(ln)
new_exe = Executable(script)
new_exe.__dict__.update(exe.__dict__)
new_exe.script = script
exe = new_exe
yield exe
def get_data_files(self):
"""Yield (source,destination) tuples for data files.
This method generates the names of all data file to be included in
the frozen app. They should be placed directly into the freeze
directory as raw files.
"""
fdir = self.freeze_dir
if sys.platform == "win32" and self.bundle_msvcrt:
for (src,dst) in self.get_msvcrt_private_assembly_files():
yield (src,dst)
if self.distribution.data_files:
for datafile in self.distribution.data_files:
# Plain strings get placed in the root dist directory.
if isinstance(datafile,basestring):
datafile = ("",[datafile])
(dst,sources) = datafile
if os.path.isabs(dst):
err = "cant freeze absolute data_file paths (%s)"
err = err % (dst,)
raise ValueError(err)
dst = convert_path(dst)
for src in sources:
src = convert_path(src)
yield (src,os.path.join(dst,os.path.basename(src)))
def get_package_data(self):
"""Yield (source,destination) tuples for package data files.
This method generates the names of all package data files to be
included in the frozen app. They should be placed in the library.zip
or equivalent, alongside the python files for that package.
"""
if self.distribution.package_data:
for pkg,data in self.distribution.package_data.iteritems():
pkg_dir = self.get_package_dir(pkg)
pkg_path = pkg.replace(".","/")
if isinstance(data,basestring):
data = [data]
for dpattern in data:
dfiles = glob(os.path.join(pkg_dir,convert_path(dpattern)))
for nm in dfiles:
arcnm = pkg_path + nm[len(pkg_dir):]
yield (nm,arcnm)
def get_package_dir(self,pkg):
"""Return directory where the given package is located.
This was largely swiped from distutils, with some cleanups.
"""
inpath = pkg.split(".")
outpath = []
if not self.distribution.package_dir:
outpath = inpath
else:
while inpath:
try:
dir = self.distribution.package_dir[".".join(inpath)]
except KeyError:
outpath.insert(0, inpath[-1])
del inpath[-1]
else:
outpath.insert(0, dir)
break
else:
try:
dir = self.package_dir[""]
except KeyError:
pass
else:
outpath.insert(0, dir)
if outpath:
return os.path.join(*outpath)
else:
return ""
@staticmethod
def get_msvcrt_private_assembly_files():
"""Get (source,destination) tuples for the MSVCRT DLLs, manifest etc.
This method generates data_files tuples for the MSVCRT DLLs, manifest
and associated paraphernalia. Including these files is required for
newer Python versions if you want to run on machines that don't have
the latest C runtime installed *and* you don't want to run the special
"vcredist_x86.exe" program during your installation process.
Bundling is only performed on win32 paltforms, and only if you enable
it explicitly. Before doing so, carefully check whether you have a
license to distribute these files.
"""
cls = bdist_esky
msvcrt_info = cls._get_msvcrt_info()
if msvcrt_info is not None:
msvcrt_name = msvcrt_info[0]
# Find installed manifest file with matching info
for candidate in cls._find_msvcrt_manifest_files(msvcrt_name):
manifest_file, msvcrt_dir = candidate
try:
with open(manifest_file,"rb") as mf:
manifest_data = mf.read()
for info in msvcrt_info:
if info.encode() not in manifest_data:
break
else:
break
except EnvironmentError:
pass
else:
err = "manifest for %s not found" % (msvcrt_info,)
raise RuntimeError(err)
# Copy the manifest and matching directory into the freeze dir.
manifest_name = msvcrt_name + ".manifest"
yield (manifest_file,os.path.join(msvcrt_name,manifest_name))
for fnm in os.listdir(msvcrt_dir):
yield (os.path.join(msvcrt_dir,fnm),
os.path.join(msvcrt_name,fnm))
@staticmethod
def _get_msvcrt_info():
"""Get info about the MSVCRT in use by this python executable.
This parses the name, version and public key token out of the exe
manifest and returns them as a tuple.
"""
try:
manifest_str = winres.get_app_manifest()
except EnvironmentError:
return None
manifest = minidom.parseString(manifest_str)
for assembly in manifest.getElementsByTagName("assemblyIdentity"):
name = assembly.attributes["name"].value
if name.startswith("Microsoft") and name.endswith("CRT"):
version = assembly.attributes["version"].value
pubkey = assembly.attributes["publicKeyToken"].value
return (name,version,pubkey)
return None
@staticmethod
def _find_msvcrt_manifest_files(name):
"""Search the system for candidate MSVCRT manifest files.
This method yields (manifest_file,msvcrt_dir) tuples giving a candidate
manifest file for the given assembly name, and the directory in which
the actual assembly data files are found.
"""
cls = bdist_esky
# Search for redist files in a Visual Studio install
progfiles = os.path.expandvars("%PROGRAMFILES%")
for dnm in os.listdir(progfiles):
if dnm.lower().startswith("microsoft visual studio"):
dpath = os.path.join(progfiles,dnm,"VC","redist")
for (subdir,_,filenames) in os.walk(dpath):
for fnm in filenames:
if name.lower() in fnm.lower():
if fnm.lower().endswith(".manifest"):
mf = os.path.join(subdir,fnm)
md = cls._find_msvcrt_dir_for_manifest(name,mf)
if md is not None:
yield (mf,md)
# Search for manifests installed in the WinSxS directory
winsxs_m = os.path.expandvars("%WINDIR%\\WinSxS\\Manifests")
for fnm in os.listdir(winsxs_m):
if name.lower() in fnm.lower():
if fnm.lower().endswith(".manifest"):
mf = os.path.join(winsxs_m,fnm)
md = cls._find_msvcrt_dir_for_manifest(name,mf)
if md is not None:
yield (mf,md)
winsxs = os.path.expandvars("%WINDIR%\\WinSxS")
for fnm in os.listdir(winsxs):
if name.lower() in fnm.lower():
if fnm.lower().endswith(".manifest"):
mf = os.path.join(winsxs,fnm)
md = cls._find_msvcrt_dir_for_manifest(name,mf)
if md is not None:
yield (mf,md)
@staticmethod
def _find_msvcrt_dir_for_manifest(msvcrt_name,manifest_file):
"""Find the directory containing data files for the given manifest.
This searches a few common locations for the data files that go with
the given manifest file. If a suitable directory is found then it is
returned, otherwise None is returned.
"""
# The manifest file might be next to the dir, inside the dir, or
# in a subdir named "Manifests". Walk around till we find it.
msvcrt_dir = ".".join(manifest_file.split(".")[:-1])
if os.path.isdir(msvcrt_dir):
return msvcrt_dir
msvcrt_basename = os.path.basename(msvcrt_dir)
msvcrt_parent = os.path.dirname(os.path.dirname(msvcrt_dir))
msvcrt_dir = os.path.join(msvcrt_parent,msvcrt_basename)
if os.path.isdir(msvcrt_dir):
return msvcrt_dir
msvcrt_dir = os.path.join(msvcrt_parent,msvcrt_name)
if os.path.isdir(msvcrt_dir):
return msvcrt_dir
return None
def compile_to_bootstrap_exe(self,exe,source,relpath=None):
"""Compile the given sourcecode into a bootstrapping exe.
This method compiles the given sourcecode into a stand-alone exe using
PyPy, then stores that in the bootstrap env under the name of the given
Executable object. If the source has been previously compiled then a
cached version of the exe may be used.
"""
if not relpath:
relpath = exe.name
source = "__rpython__ = True\n" + source
cdir = os.path.join(self.tempdir,"compile")
if not os.path.exists(cdir):
os.mkdir(cdir)
source_hash = hashlib.md5(source).hexdigest()
outname = "bootstrap_%s.%s" % (source_hash,get_platform())
if exe.gui_only:
outname += ".gui"
if sys.platform == "win32":
outname += ".exe"
# First try to use a precompiled version.
if COMPILED_BOOTSTRAP_CACHE is not None:
outfile = os.path.join(COMPILED_BOOTSTRAP_CACHE,outname)
if os.path.exists(outfile):
return self.copy_to_bootstrap_env(outfile,relpath)
# Otherwise we have to compile it anew.
try:
outfile = self._compiled_exes[(source_hash,exe.gui_only)]
except KeyError:
infile = os.path.join(cdir,"bootstrap.py")
outfile = os.path.join(cdir,outname)
with open(infile,"wt") as f:
f.write(source)
opts = dict(gui_only=exe.gui_only)
pypyc.compile_rpython(infile,outfile,**opts)
self._compiled_exes[(source_hash,exe.gui_only)] = outfile
# Try to save the compiled exe for future use.
if COMPILED_BOOTSTRAP_CACHE is not None:
cachedfile = os.path.join(COMPILED_BOOTSTRAP_CACHE,outname)
try:
shutil.copy2(outfile,cachedfile)
except EnvironmentError:
pass
return self.copy_to_bootstrap_env(outfile,relpath)
def copy_to_bootstrap_env(self,src,dst=None):
"""Copy the named file into the bootstrap environment.
The filename is also added to the bootstrap manifest.
"""
if dst is None:
dst = src
srcpath = os.path.join(self.freeze_dir,src)
dstpath = os.path.join(self.bootstrap_dir,dst)
if os.path.isdir(srcpath):
self.copy_tree(srcpath,dstpath)
else:
if not os.path.isdir(os.path.dirname(dstpath)):
self.mkpath(os.path.dirname(dstpath))
self.copy_file(srcpath,dstpath)
self.add_to_bootstrap_manifest(dstpath)
return dstpath
def add_to_bootstrap_manifest(self,dstpath):
if not os.path.isdir(os.path.join(self.freeze_dir,ESKY_CONTROL_DIR)):
os.mkdir(os.path.join(self.freeze_dir,ESKY_CONTROL_DIR))
f_manifest = os.path.join(self.freeze_dir,ESKY_CONTROL_DIR,"bootstrap-manifest.txt")
with open(f_manifest,"at") as f_manifest:
f_manifest.seek(0,os.SEEK_END)
if os.path.isdir(dstpath):
for (dirnm,_,filenms) in os.walk(dstpath):
for fnm in filenms:
fpath = os.path.join(dirnm,fnm)
dpath = fpath[len(self.bootstrap_dir)+1:]
if os.sep != "/":
dpath = dpath.replace(os.sep,"/")
f_manifest.write(dpath)
f_manifest.write("\n")
else:
dst = dstpath[len(self.bootstrap_dir)+1:]
if os.sep != "/":
dst = dst.replace(os.sep,"/")
f_manifest.write(dst)
f_manifest.write("\n")
class bdist_esky_patch(Command):
"""Create a patch for a frozen application in 'esky' format.
This distutils command can be used to create a patch file between two
versions of an application frozen with esky. Such a patch can be used
for differential updates between application versions.
"""
user_options = [
('dist-dir=', 'd',
"directory to put final built distributions in"),
('from-version=', None,
"version against which to produce patch"),
]
def initialize_options(self):
self.dist_dir = None
self.from_version = None
def finalize_options(self):
self.set_undefined_options('bdist',('dist_dir', 'dist_dir'))
def run(self):
fullname = self.distribution.get_fullname()
platform = get_platform()
vdir = "%s.%s" % (fullname,platform,)
appname = split_app_version(vdir)[0]
# Ensure we have current version's esky, as target for patch.
target_esky = os.path.join(self.dist_dir,vdir+".zip")
if not os.path.exists(target_esky):
self.run_command("bdist_esky")
# Generate list of source eskys to patch against.
if self.from_version:
source_vdir = join_app_version(appname,self.from_version,platform)
source_eskys = [os.path.join(self.dist_dir,source_vdir+".zip")]
else:
source_eskys = []
for nm in os.listdir(self.dist_dir):
if target_esky.endswith(nm):
continue
if nm.startswith(appname+"-") and nm.endswith(platform+".zip"):
source_eskys.append(os.path.join(self.dist_dir,nm))
# Write each patch, transparently unzipping the esky
for source_esky in source_eskys:
target_vdir = os.path.basename(source_esky)[:-4]
target_version = split_app_version(target_vdir)[1]
patchfile = vdir+".from-%s.patch" % (target_version,)
patchfile = os.path.join(self.dist_dir,patchfile)
print "patching", target_esky, "against", source_esky, "=>", patchfile
if not self.dry_run:
try:
esky.patch.main(["-Z","diff",source_esky,target_esky,patchfile])
except:
import traceback
traceback.print_exc()
raise
# Monkey-patch distutils to include our commands by default.
distutils.command.__all__.append("bdist_esky")
distutils.command.__all__.append("bdist_esky_patch")
sys.modules["distutils.command.bdist_esky"] = sys.modules["esky.bdist_esky"]
sys.modules["distutils.command.bdist_esky_patch"] = sys.modules["esky.bdist_esky"]
|
{
"content_hash": "c6179d92cb4052bc25de09612d170620",
"timestamp": "",
"source": "github",
"line_count": 837,
"max_line_length": 180,
"avg_line_length": 41.800477897252094,
"alnum_prop": 0.5676394089233143,
"repo_name": "timeyyy/esky",
"id": "369be679b0654d953db349d55a84c1f72079e8d9",
"size": "35108",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "esky/bdist_esky/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "403936"
}
],
"symlink_target": ""
}
|
import networkx
from yaiep.graph.Node import Node
##
# Classe che rappresenta l'intero spazio di ricerca che viene
# generato via via che il metodo di ricerca ispeziona nuovi nodi
#
class SearchGraph(networkx.DiGraph):
##
# Crea il grafo di ricerca come un grafo direzionato
# il quale ha come nodo iniziale lo stato iniziale
# dal quale il metodo di ricerca partirà per poter esplorare
# lo spazio delle soluzioni
#
# @param init_state stato iniziale dal quale inizia la ricerca
def __init__(self, init_state):
networkx.DiGraph.__init__(self)
self._init_state = Node(init_state.copy(), None)
# inserisci lo stato iniziale a partire dal quale ispezionare lo spazio di ricerca
self.add_node(self._init_state)
##
# Restituisce il riferimento allo stato iniziale dal
# quale è iniziata la ricerca
#
def get_init_state(self):
return self._init_state
def __str__(self):
res = ''
for node in self:
res += '{0} -> '.format(str(node.wm))
for adj in self.neighbors(node):
res += str(adj.wm) + '\n'
return res
|
{
"content_hash": "e16cd9747a024fd478c8efd5042b7fd2",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 90,
"avg_line_length": 29.923076923076923,
"alnum_prop": 0.636675235646958,
"repo_name": "aleSuglia/YAIEP",
"id": "34e1ac88e6e3ecc68e91a8e3b1579571f09f56f6",
"size": "1169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yaiep/graph/SearchGraph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "151155"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from collections import deque
from contextlib import contextmanager
from six.moves import configparser
from pathlib import Path
import distutils.util
import io
import json
import logging
import os
import six
import sys
logger = logging.getLogger(__file__)
marker = object()
LOCAL_CONFIG = "config.txt"
SENSITIVE_KEY_NAMES = ("access_id", "access_key", "password", "secret", "token")
def is_valid_json(value):
json.loads(value)
default_keys = (
# These are the keys allowed in a dallinger experiment
# config.txt file.
("ad_group", six.text_type, []),
("approve_requirement", int, []),
("assign_qualifications", bool, []),
("auto_recruit", bool, []),
("aws_access_key_id", six.text_type, ["AWS_ACCESS_KEY_ID"], True),
(
"aws_region",
six.text_type,
["AWS_REGION", "AWS_DEFAULT_REGION", "aws_default_region"],
),
("aws_secret_access_key", six.text_type, ["AWS_SECRET_ACCESS_KEY"], True),
("base_payment", float, []),
("base_port", int, []),
("browser_exclude_rule", six.text_type, []),
("clock_on", bool, []),
("contact_email_on_error", six.text_type, []),
("chrome-path", six.text_type, []),
("dallinger_develop_directory", six.text_type, []),
("dallinger_email_address", six.text_type, []),
("dashboard_password", six.text_type, [], True),
("dashboard_user", six.text_type, [], True),
("database_size", six.text_type, []),
("database_url", six.text_type, [], True),
("debug_recruiter", six.text_type, []),
("description", six.text_type, []),
("disable_when_duration_exceeded", bool, []),
("duration", float, []),
("dyno_type", six.text_type, []),
("dyno_type_web", six.text_type, []),
("dyno_type_worker", six.text_type, []),
("enable_global_experiment_registry", bool, []),
("EXPERIMENT_CLASS_NAME", six.text_type, []),
("group_name", six.text_type, []),
("heroku_app_id_root", six.text_type, []),
("heroku_auth_token", six.text_type, [], True),
("heroku_python_version", six.text_type, []),
("heroku_team", six.text_type, ["team"]),
("host", six.text_type, []),
("id", six.text_type, []),
("infrastructure_debug_details", six.text_type, [], False),
("keywords", six.text_type, []),
("language", six.text_type, []),
("lifetime", int, []),
("logfile", six.text_type, []),
("loglevel", int, []),
("mode", six.text_type, []),
("mturk_qualification_blocklist", six.text_type, ["qualification_blacklist"]),
("mturk_qualification_requirements", six.text_type, [], False, [is_valid_json]),
("num_dynos_web", int, []),
("num_dynos_worker", int, []),
("organization_name", six.text_type, []),
("port", int, ["PORT"]),
("prolific_api_token", six.text_type, ["PROLIFIC_RESEARCHER_API_TOKEN"], True),
("prolific_api_version", six.text_type, []),
("prolific_estimated_completion_minutes", int, []),
("prolific_maximum_allowed_minutes", int, []),
("prolific_recruitment_config", six.text_type, [], False, [is_valid_json]),
("prolific_reward_cents", int, []),
("protected_routes", six.text_type, [], False, [is_valid_json]),
("recruiter", six.text_type, []),
("recruiters", six.text_type, []),
("redis_size", six.text_type, []),
("replay", bool, []),
("sentry", bool, []),
("smtp_host", six.text_type, []),
("smtp_username", six.text_type, []),
("smtp_password", six.text_type, ["dallinger_email_password"], True),
("threads", six.text_type, []),
("title", six.text_type, []),
("question_max_length", int, []),
("us_only", bool, []),
("webdriver_type", six.text_type, []),
("webdriver_url", six.text_type, []),
("whimsical", bool, []),
("worker_multiplier", float, []),
("docker_image_base_name", six.text_type, [], ""),
("docker_image_name", six.text_type, [], ""),
("docker_ssh_volumes", six.text_type, [], ""),
)
class Configuration(object):
SUPPORTED_TYPES = {six.binary_type, six.text_type, int, float, bool}
_experiment_params_loaded = False
_module_params_loaded = False
def __init__(self):
self._reset()
def set(self, key, value):
return self.extend({key: value})
def clear(self):
self.data = deque()
self.ready = False
def _reset(self, register_defaults=False):
self.clear()
self.types = {}
self.synonyms = {}
self.validators = {}
self.sensitive = set()
self._experiment_params_loaded = False
self._module_params_loaded = False
if register_defaults:
for registration in default_keys:
self.register(*registration)
def extend(self, mapping, cast_types=False, strict=False):
normalized_mapping = {}
for key, value in mapping.items():
key = self.synonyms.get(key, key)
if key not in self.types:
# This key hasn't been registered, we ignore it
if strict:
raise KeyError("{} is not a valid configuration key".format(key))
logger.debug("{} is not a valid configuration key".format(key))
continue
expected_type = self.types.get(key)
if cast_types:
if isinstance(value, six.text_type) and value.startswith("file:"):
# Load this value from a file
_, filename = value.split(":", 1)
with io.open(filename, "rt", encoding="utf-8") as source_file:
value = source_file.read()
try:
if expected_type == bool:
value = distutils.util.strtobool(value)
value = expected_type(value)
except ValueError:
pass
if not isinstance(value, expected_type):
raise TypeError(
"Got {value} for {key}, expected {expected_type}".format(
value=repr(value), key=key, expected_type=expected_type
)
)
for validator in self.validators.get(key, []):
try:
validator(value)
except ValueError as e:
# Annotate the exception with more info
e.dallinger_config_key = key
e.dallinger_config_value = value
raise e
normalized_mapping[key] = value
self.data.extendleft([normalized_mapping])
@contextmanager
def override(self, *args, **kwargs):
self.extend(*args, **kwargs)
yield self
self.data.popleft()
def get(self, key, default=marker):
if not self.ready:
raise RuntimeError("Config not loaded")
for layer in self.data:
try:
value = layer[key]
if isinstance(value, six.text_type):
value = value.strip()
return value
except KeyError:
continue
if default is marker:
raise KeyError(key)
return default
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
return self.extend({key: value})
def __getattr__(self, key):
try:
return self.get(key)
except KeyError:
raise AttributeError
def as_dict(self):
d = {}
for key in self.types:
if key not in self.sensitive:
try:
d[key] = self.get(key)
except KeyError:
pass
return d
def is_sensitive(self, key):
if key in self.sensitive:
return True
# Also, does a sensitive string appear within the key?
return any(s for s in SENSITIVE_KEY_NAMES if s in key)
def register(self, key, type_, synonyms=None, sensitive=False, validators=None):
if synonyms is None:
synonyms = set()
if key in self.types:
raise KeyError("Config key {} is already registered".format(key))
if type_ not in self.SUPPORTED_TYPES:
raise TypeError("{type} is not a supported type".format(type=type_))
self.types[key] = type_
for synonym in synonyms:
self.synonyms[synonym] = key
if validators:
self.validators[key] = validators
if sensitive:
self.sensitive.add(key)
def load_from_file(self, filename):
parser = configparser.ConfigParser()
parser.read(filename)
data = {}
for section in parser.sections():
data.update(dict(parser.items(section)))
self.extend(data, cast_types=True, strict=True)
def write(self, filter_sensitive=False, directory=None):
parser = configparser.ConfigParser()
parser.add_section("Parameters")
for layer in reversed(self.data):
for k, v in layer.items():
if filter_sensitive and self.is_sensitive(k):
continue
parser.set("Parameters", k, six.text_type(v))
directory = directory or os.getcwd()
destination = os.path.join(directory, LOCAL_CONFIG)
with open(destination, "w") as fp:
parser.write(fp)
def load_from_environment(self):
self.extend(os.environ, cast_types=True)
def load_defaults(self):
"""Load default configuration values"""
# Apply extra parameters before loading the configs
self.register_extra_parameters()
global_config_name = ".dallingerconfig"
global_config = os.path.expanduser(os.path.join("~/", global_config_name))
defaults_folder = os.path.join(os.path.dirname(__file__), "default_configs")
local_defaults_file = os.path.join(defaults_folder, "local_config_defaults.txt")
global_defaults_file = os.path.join(
defaults_folder, "global_config_defaults.txt"
)
# Load the configuration, with local parameters overriding global ones.
for config_file in [global_defaults_file, local_defaults_file, global_config]:
self.load_from_file(config_file)
def load(self):
self.load_defaults()
localConfig = os.path.join(os.getcwd(), LOCAL_CONFIG)
if os.path.exists(localConfig):
self.load_from_file(localConfig)
self.load_from_environment()
self.ready = True
if self.get("docker_image_base_name", None) is None:
self.set("docker_image_base_name", Path(os.getcwd()).name)
def register_extra_parameters(self):
initialize_experiment_package(os.getcwd())
extra_parameters = None
# Import and instantiate the experiment class if available
# This will run any experiment specific parameter registrations
from dallinger.experiment import load
try:
exp_klass = load()
except ImportError:
exp_klass = None
exp_params = getattr(exp_klass, "extra_parameters", None)
if exp_params is not None and not self._experiment_params_loaded:
exp_params()
self._experiment_params_loaded = True
try:
from dallinger_experiment.experiment import extra_parameters
except ImportError:
try:
from dallinger_experiment.dallinger_experiment import extra_parameters
except ImportError:
try:
from dallinger_experiment import extra_parameters
except ImportError:
pass
if extra_parameters is not None and not self._module_params_loaded:
extra_parameters()
self._module_params_loaded = True
config = None
def get_config():
global config
if config is None:
config = Configuration()
for registration in default_keys:
config.register(*registration)
return config
def initialize_experiment_package(path):
"""Make the specified directory importable as the `dallinger_experiment` package."""
# Create __init__.py if it doesn't exist (needed for Python 2)
init_py = os.path.join(path, "__init__.py")
if not os.path.exists(init_py):
open(init_py, "a").close()
# Retain already set experiment module
if sys.modules.get("dallinger_experiment") is not None:
return
dirname = os.path.dirname(path)
basename = os.path.basename(path)
sys.path.insert(0, dirname)
package = __import__(basename)
if Path(path) not in [Path(p) for p in package.__path__]:
raise Exception(
"Package was not imported from the requested path! ({} not in {})".format(
path, package.__path__
)
)
sys.modules["dallinger_experiment"] = package
package.__package__ = "dallinger_experiment"
package.__name__ = "dallinger_experiment"
sys.path.pop(0)
|
{
"content_hash": "7ced6946d0bfb879a5ae0b9b09544d05",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 88,
"avg_line_length": 35.625,
"alnum_prop": 0.5739893211289092,
"repo_name": "Dallinger/Dallinger",
"id": "11f60e5b36d25b1555be49abe3f04c54a66729a8",
"size": "13110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dallinger/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2204"
},
{
"name": "Dockerfile",
"bytes": "4288"
},
{
"name": "HTML",
"bytes": "62909"
},
{
"name": "JavaScript",
"bytes": "49602"
},
{
"name": "Jinja",
"bytes": "4871"
},
{
"name": "Procfile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "1131695"
},
{
"name": "Ruby",
"bytes": "1769"
},
{
"name": "Shell",
"bytes": "2905"
}
],
"symlink_target": ""
}
|
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
|
{
"content_hash": "07b8191e8c41d5177a86b27e96d772be",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 64,
"avg_line_length": 32.75,
"alnum_prop": 0.6564885496183206,
"repo_name": "plum-umd/pasket",
"id": "a2ac095cafa00f055f17e9d0dd235f3070dd349b",
"size": "248",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/enum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DTrace",
"bytes": "1739"
},
{
"name": "GAP",
"bytes": "21334"
},
{
"name": "HTML",
"bytes": "26576"
},
{
"name": "Java",
"bytes": "1372909"
},
{
"name": "JavaScript",
"bytes": "707"
},
{
"name": "Python",
"bytes": "801669"
},
{
"name": "Shell",
"bytes": "1790"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
import codecs
import os.path
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
with open("README.md", "r") as f:
long_description = f.read()
setup(
name='kaggle-environments',
version=get_version("kaggle_environments/__init__.py"),
description='Kaggle Environments',
long_description=long_description,
long_description_content_type='text/markdown',
author='Kaggle',
author_email='support@kaggle.com',
url='https://github.com/Kaggle/kaggle-environments',
keywords=['Kaggle'],
entry_points={'console_scripts': [
'kaggle-environments = kaggle_environments.main:main']},
install_requires=[
"jsonschema >= 3.0.1",
"Flask >= 1.1.2",
"numpy >= 1.19.5",
"requests >= 2.25.1"
],
packages=find_packages(),
include_package_data=True,
python_requires='>=3.6',
license='Apache 2.0')
|
{
"content_hash": "c1cc0d5a959a0f5ce486c29fac7c7496",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 64,
"avg_line_length": 31.238095238095237,
"alnum_prop": 0.6219512195121951,
"repo_name": "Kaggle/kaggle-environments",
"id": "ee1242916588872005ae0d7b575ea214e9109d6c",
"size": "1922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2809"
},
{
"name": "HTML",
"bytes": "25293"
},
{
"name": "Java",
"bytes": "77923"
},
{
"name": "JavaScript",
"bytes": "122379"
},
{
"name": "Jupyter Notebook",
"bytes": "1847244"
},
{
"name": "Python",
"bytes": "451561"
},
{
"name": "Shell",
"bytes": "16147"
},
{
"name": "TypeScript",
"bytes": "84404"
}
],
"symlink_target": ""
}
|
"""Test config flow."""
from unittest.mock import patch
import pytest
import voluptuous as vol
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import mqtt
from homeassistant.components.hassio import HassioServiceInfo
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
@pytest.fixture(autouse=True)
def mock_finish_setup():
"""Mock out the finish setup method."""
with patch(
"homeassistant.components.mqtt.MQTT.async_connect", return_value=True
) as mock_finish:
yield mock_finish
@pytest.fixture
def mock_try_connection():
"""Mock the try connection method."""
with patch("homeassistant.components.mqtt.config_flow.try_connection") as mock_try:
yield mock_try
async def test_user_connection_works(
hass, mock_try_connection, mock_finish_setup, mqtt_client_mock
):
"""Test we can finish a config flow."""
mock_try_connection.return_value = True
result = await hass.config_entries.flow.async_init(
"mqtt", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"broker": "127.0.0.1"}
)
assert result["type"] == "create_entry"
assert result["result"].data == {
"broker": "127.0.0.1",
"port": 1883,
"discovery": True,
}
# Check we tried the connection
assert len(mock_try_connection.mock_calls) == 1
# Check config entry got setup
assert len(mock_finish_setup.mock_calls) == 1
async def test_user_connection_fails(hass, mock_try_connection, mock_finish_setup):
"""Test if connection cannot be made."""
mock_try_connection.return_value = False
result = await hass.config_entries.flow.async_init(
"mqtt", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"broker": "127.0.0.1"}
)
assert result["type"] == "form"
assert result["errors"]["base"] == "cannot_connect"
# Check we tried the connection
assert len(mock_try_connection.mock_calls) == 1
# Check config entry did not setup
assert len(mock_finish_setup.mock_calls) == 0
async def test_manual_config_set(
hass, mock_try_connection, mock_finish_setup, mqtt_client_mock
):
"""Test we ignore entry if manual config available."""
assert await async_setup_component(hass, "mqtt", {"mqtt": {"broker": "bla"}})
await hass.async_block_till_done()
assert len(mock_finish_setup.mock_calls) == 1
mock_try_connection.return_value = True
result = await hass.config_entries.flow.async_init(
"mqtt", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
async def test_user_single_instance(hass):
"""Test we only allow a single config flow."""
MockConfigEntry(domain="mqtt").add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
"mqtt", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
async def test_hassio_already_configured(hass):
"""Test we only allow a single config flow."""
MockConfigEntry(domain="mqtt").add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
"mqtt", context={"source": config_entries.SOURCE_HASSIO}
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_hassio_ignored(hass: HomeAssistant) -> None:
"""Test we supervisor discovered instance can be ignored."""
MockConfigEntry(
domain=mqtt.DOMAIN, source=config_entries.SOURCE_IGNORE
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
mqtt.DOMAIN,
data=HassioServiceInfo(
config={"addon": "Mosquitto", "host": "mock-mosquitto", "port": "1883"}
),
context={"source": config_entries.SOURCE_HASSIO},
)
assert result
assert result.get("type") == data_entry_flow.RESULT_TYPE_ABORT
assert result.get("reason") == "already_configured"
async def test_hassio_confirm(
hass, mock_try_connection, mock_finish_setup, mqtt_client_mock
):
"""Test we can finish a config flow."""
mock_try_connection.return_value = True
result = await hass.config_entries.flow.async_init(
"mqtt",
data=HassioServiceInfo(
config={
"addon": "Mock Addon",
"host": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
}
),
context={"source": config_entries.SOURCE_HASSIO},
)
assert result["type"] == "form"
assert result["step_id"] == "hassio_confirm"
assert result["description_placeholders"] == {"addon": "Mock Addon"}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"discovery": True}
)
assert result["type"] == "create_entry"
assert result["result"].data == {
"broker": "mock-broker",
"port": 1883,
"username": "mock-user",
"password": "mock-pass",
"protocol": "3.1.1",
"discovery": True,
}
# Check we tried the connection
assert len(mock_try_connection.mock_calls) == 1
# Check config entry got setup
assert len(mock_finish_setup.mock_calls) == 1
async def test_option_flow(hass, mqtt_mock, mock_try_connection):
"""Test config flow options."""
mock_try_connection.return_value = True
config_entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
config_entry.data = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
mqtt_mock.async_connect.reset_mock()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "broker"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_BROKER: "another-broker",
mqtt.CONF_PORT: 2345,
mqtt.CONF_USERNAME: "user",
mqtt.CONF_PASSWORD: "pass",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "options"
await hass.async_block_till_done()
assert mqtt_mock.async_connect.call_count == 0
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_DISCOVERY: True,
"birth_enable": True,
"birth_topic": "ha_state/online",
"birth_payload": "online",
"birth_qos": 1,
"birth_retain": True,
"will_enable": True,
"will_topic": "ha_state/offline",
"will_payload": "offline",
"will_qos": 2,
"will_retain": True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] is None
assert config_entry.data == {
mqtt.CONF_BROKER: "another-broker",
mqtt.CONF_PORT: 2345,
mqtt.CONF_USERNAME: "user",
mqtt.CONF_PASSWORD: "pass",
mqtt.CONF_DISCOVERY: True,
mqtt.CONF_BIRTH_MESSAGE: {
mqtt.ATTR_TOPIC: "ha_state/online",
mqtt.ATTR_PAYLOAD: "online",
mqtt.ATTR_QOS: 1,
mqtt.ATTR_RETAIN: True,
},
mqtt.CONF_WILL_MESSAGE: {
mqtt.ATTR_TOPIC: "ha_state/offline",
mqtt.ATTR_PAYLOAD: "offline",
mqtt.ATTR_QOS: 2,
mqtt.ATTR_RETAIN: True,
},
}
await hass.async_block_till_done()
assert mqtt_mock.async_connect.call_count == 1
async def test_disable_birth_will(hass, mqtt_mock, mock_try_connection):
"""Test disabling birth and will."""
mock_try_connection.return_value = True
config_entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
config_entry.data = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
mqtt_mock.async_connect.reset_mock()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "broker"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_BROKER: "another-broker",
mqtt.CONF_PORT: 2345,
mqtt.CONF_USERNAME: "user",
mqtt.CONF_PASSWORD: "pass",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "options"
await hass.async_block_till_done()
assert mqtt_mock.async_connect.call_count == 0
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_DISCOVERY: True,
"birth_enable": False,
"birth_topic": "ha_state/online",
"birth_payload": "online",
"birth_qos": 1,
"birth_retain": True,
"will_enable": False,
"will_topic": "ha_state/offline",
"will_payload": "offline",
"will_qos": 2,
"will_retain": True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] is None
assert config_entry.data == {
mqtt.CONF_BROKER: "another-broker",
mqtt.CONF_PORT: 2345,
mqtt.CONF_USERNAME: "user",
mqtt.CONF_PASSWORD: "pass",
mqtt.CONF_DISCOVERY: True,
mqtt.CONF_BIRTH_MESSAGE: {},
mqtt.CONF_WILL_MESSAGE: {},
}
await hass.async_block_till_done()
assert mqtt_mock.async_connect.call_count == 1
def get_default(schema, key):
"""Get default value for key in voluptuous schema."""
for k in schema.keys():
if k == key:
if k.default == vol.UNDEFINED:
return None
return k.default()
def get_suggested(schema, key):
"""Get suggested value for key in voluptuous schema."""
for k in schema.keys():
if k == key:
if k.description is None or "suggested_value" not in k.description:
return None
return k.description["suggested_value"]
async def test_option_flow_default_suggested_values(
hass, mqtt_mock, mock_try_connection
):
"""Test config flow options has default/suggested values."""
mock_try_connection.return_value = True
config_entry = hass.config_entries.async_entries(mqtt.DOMAIN)[0]
config_entry.data = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
mqtt.CONF_USERNAME: "user",
mqtt.CONF_PASSWORD: "pass",
mqtt.CONF_DISCOVERY: True,
mqtt.CONF_BIRTH_MESSAGE: {
mqtt.ATTR_TOPIC: "ha_state/online",
mqtt.ATTR_PAYLOAD: "online",
mqtt.ATTR_QOS: 1,
mqtt.ATTR_RETAIN: True,
},
mqtt.CONF_WILL_MESSAGE: {
mqtt.ATTR_TOPIC: "ha_state/offline",
mqtt.ATTR_PAYLOAD: "offline",
mqtt.ATTR_QOS: 2,
mqtt.ATTR_RETAIN: False,
},
}
# Test default/suggested values from config
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "broker"
defaults = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
suggested = {
mqtt.CONF_USERNAME: "user",
mqtt.CONF_PASSWORD: "pass",
}
for k, v in defaults.items():
assert get_default(result["data_schema"].schema, k) == v
for k, v in suggested.items():
assert get_suggested(result["data_schema"].schema, k) == v
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_BROKER: "another-broker",
mqtt.CONF_PORT: 2345,
mqtt.CONF_USERNAME: "us3r",
mqtt.CONF_PASSWORD: "p4ss",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "options"
defaults = {
mqtt.CONF_DISCOVERY: True,
"birth_qos": 1,
"birth_retain": True,
"will_qos": 2,
"will_retain": False,
}
suggested = {
"birth_topic": "ha_state/online",
"birth_payload": "online",
"will_topic": "ha_state/offline",
"will_payload": "offline",
}
for k, v in defaults.items():
assert get_default(result["data_schema"].schema, k) == v
for k, v in suggested.items():
assert get_suggested(result["data_schema"].schema, k) == v
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_DISCOVERY: False,
"birth_topic": "ha_state/onl1ne",
"birth_payload": "onl1ne",
"birth_qos": 2,
"birth_retain": False,
"will_topic": "ha_state/offl1ne",
"will_payload": "offl1ne",
"will_qos": 1,
"will_retain": True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
# Test updated default/suggested values from config
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "broker"
defaults = {
mqtt.CONF_BROKER: "another-broker",
mqtt.CONF_PORT: 2345,
}
suggested = {
mqtt.CONF_USERNAME: "us3r",
mqtt.CONF_PASSWORD: "p4ss",
}
for k, v in defaults.items():
assert get_default(result["data_schema"].schema, k) == v
for k, v in suggested.items():
assert get_suggested(result["data_schema"].schema, k) == v
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={mqtt.CONF_BROKER: "another-broker", mqtt.CONF_PORT: 2345},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "options"
defaults = {
mqtt.CONF_DISCOVERY: False,
"birth_qos": 2,
"birth_retain": False,
"will_qos": 1,
"will_retain": True,
}
suggested = {
"birth_topic": "ha_state/onl1ne",
"birth_payload": "onl1ne",
"will_topic": "ha_state/offl1ne",
"will_payload": "offl1ne",
}
for k, v in defaults.items():
assert get_default(result["data_schema"].schema, k) == v
for k, v in suggested.items():
assert get_suggested(result["data_schema"].schema, k) == v
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
mqtt.CONF_DISCOVERY: True,
"birth_topic": "ha_state/onl1ne",
"birth_payload": "onl1ne",
"birth_qos": 2,
"birth_retain": False,
"will_topic": "ha_state/offl1ne",
"will_payload": "offl1ne",
"will_qos": 1,
"will_retain": True,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
# Make sure all MQTT related jobs are done before ending the test
await hass.async_block_till_done()
async def test_options_user_connection_fails(hass, mock_try_connection):
"""Test if connection cannot be made."""
config_entry = MockConfigEntry(domain=mqtt.DOMAIN)
config_entry.add_to_hass(hass)
config_entry.data = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
mock_try_connection.return_value = False
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == "form"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={mqtt.CONF_BROKER: "bad-broker", mqtt.CONF_PORT: 2345},
)
assert result["type"] == "form"
assert result["errors"]["base"] == "cannot_connect"
# Check we tried the connection
assert len(mock_try_connection.mock_calls) == 1
# Check config entry did not update
assert config_entry.data == {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
async def test_options_bad_birth_message_fails(hass, mock_try_connection):
"""Test bad birth message."""
config_entry = MockConfigEntry(domain=mqtt.DOMAIN)
config_entry.add_to_hass(hass)
config_entry.data = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
mock_try_connection.return_value = True
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == "form"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={mqtt.CONF_BROKER: "another-broker", mqtt.CONF_PORT: 2345},
)
assert result["type"] == "form"
assert result["step_id"] == "options"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"birth_topic": "ha_state/online/#"},
)
assert result["type"] == "form"
assert result["errors"]["base"] == "bad_birth"
# Check config entry did not update
assert config_entry.data == {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
async def test_options_bad_will_message_fails(hass, mock_try_connection):
"""Test bad will message."""
config_entry = MockConfigEntry(domain=mqtt.DOMAIN)
config_entry.add_to_hass(hass)
config_entry.data = {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
mock_try_connection.return_value = True
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == "form"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={mqtt.CONF_BROKER: "another-broker", mqtt.CONF_PORT: 2345},
)
assert result["type"] == "form"
assert result["step_id"] == "options"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={"will_topic": "ha_state/offline/#"},
)
assert result["type"] == "form"
assert result["errors"]["base"] == "bad_will"
# Check config entry did not update
assert config_entry.data == {
mqtt.CONF_BROKER: "test-broker",
mqtt.CONF_PORT: 1234,
}
|
{
"content_hash": "96a44209fb49290c06d83cdcb645015e",
"timestamp": "",
"source": "github",
"line_count": 582,
"max_line_length": 87,
"avg_line_length": 32.57903780068729,
"alnum_prop": 0.6043985021887032,
"repo_name": "home-assistant/home-assistant",
"id": "befdc139eeba7edbbd0de3208d409464b61ff98a",
"size": "18961",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/components/mqtt/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
r"""
Biological sequences (:mod:`skbio.core.sequence`)
=================================================
.. currentmodule:: skbio.core.sequence
This module provides functionality for working with biological sequences,
including generic sequences, nucelotide sequences, DNA sequences, and RNA
sequences. Class methods and attributes are also available to obtain valid
character sets, complement maps for different sequence types, and for
obtaining degenerate character definitions.
Classes
-------
.. autosummary::
:toctree: generated/
BiologicalSequence
NucleotideSequence
DNASequence
RNASequence
Examples
--------
>>> from skbio.core.sequence import DNASequence, RNASequence
New sequences are created with optional identifier and description fields.
>>> d1 = DNASequence('ACC--G-GGTA..')
>>> d1 = DNASequence('ACC--G-GGTA..',identifier="seq1")
>>> d1 = DNASequence('ACC--G-GGTA..',identifier="seq1",description="GFP")
New sequences can also be created from existing sequences, for example as their
reverse complement or degapped (i.e., unaligned) version.
>>> d2 = d1.degap()
>>> d1
<DNASequence: ACC--G-GGT... (length: 13)>
>>> d2
<DNASequence: ACCGGGTA (length: 8)>
>>> d3 = d2.reverse_complement()
>>> d3
<DNASequence: TACCCGGT (length: 8)>
It's also straight-forward to compute distances between sequences (optionally
using user-defined distance metrics, default is Hamming distance) for use in
sequence clustering, phylogenetic reconstruction, etc.
>>> d4 = DNASequence('GACCCGCT')
>>> d5 = DNASequence('GACCCCCT')
>>> d3.distance(d4)
0.25
>>> d3.distance(d5)
0.375
Class-level methods contain information about the molecule types.
>>> DNASequence.iupac_degeneracies()['B']
set(['C', 'T', 'G'])
>>> RNASequence.iupac_degeneracies()['B']
set(['C', 'U', 'G'])
>>> DNASequence.is_gap('-')
True
"""
from __future__ import division
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from collections import Sequence
from itertools import izip, product
from scipy.spatial.distance import hamming
from skbio.core.exception import BiologicalSequenceError
class BiologicalSequence(Sequence):
"""Base class for biological sequences.
Parameters
----------
sequence : python Sequence (e.g., str, list or tuple)
The biological sequence.
identifier : str, optional
The sequence identifier (e.g., an accession number).
description : str, optional
A description or comment about the sequence (e.g., "green
fluorescent protein").
validate : bool, optional
If True, runs the `is_valid` method after construction and raises
BiologicalSequenceError if ``is_valid == False``.
Attributes
----------
description
identifier
Raises
------
skbio.core.exception.BiologicalSequenceError
If ``validate == True`` and ``is_valid == False``.
See Also
--------
NucleotideSequence
DNASequence
RNASequence
Notes
-----
`BiologicalSequence` objects are immutable. Where applicable, methods
return a new object of the same class.
Subclasses are typically defined by methods relevant to only a specific
type of biological sequence, and by containing characters only contained in
the IUPAC standard character set [1]_ for that molecule type.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUCGUGAAGGA')
>>> t = BiologicalSequence('GGUCCUGAAGGU')
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
"""
@classmethod
def alphabet(cls):
"""Return the set of characters allowed in a `BiologicalSequence`.
Returns
-------
set
Characters that are allowed in a valid `BiologicalSequence`.
See Also
--------
is_valid
gap_alphabet
unsupported_characters
has_unsupported_characters
"""
return set()
@classmethod
def gap_alphabet(cls):
"""Return the set of characters defined as gaps.
Returns
-------
set
Characters defined as gaps in a `BiologicalSequence`
See Also
--------
alphabet
unsupported_characters
has_unsupported_characters
degap
gap_maps
gap_vector
"""
return set('-.')
def __init__(self, sequence, identifier="", description="",
validate=False):
self._sequence = ''.join(sequence)
self._identifier = identifier
self._description = description
if validate and not self.is_valid():
unsupported_chars = self.unsupported_characters()
raise BiologicalSequenceError(
"Sequence contains unsupported characters: %s"
% (" ".join(unsupported_chars)))
def __contains__(self, other):
"""The in operator.
Parameters
----------
other : str
The putative subsequence.
Returns
-------
bool
Indicates whether `other` is contained in `self`.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUCGUGAAGGA')
>>> 'GGU' in s
True
>>> 'CCC' in s
False
.. shownumpydoc
"""
return other in self._sequence
def __eq__(self, other):
"""The equality operator.
Parameters
----------
other : `BiologicalSequence`
The sequence to test for equality against.
Returns
-------
bool
Indicates whether `self` and `other` are equal.
Notes
-----
`BiologicalSequences` are equal if their sequence is the same and
they are the same type.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUCGUGAAGGA')
>>> t = BiologicalSequence('GGUCGUGAAGGA')
>>> s == t
True
>>> u = BiologicalSequence('GGUCGUGACCGA')
>>> u == t
False
.. shownumpydoc
"""
if self.__class__ != other.__class__:
return False
elif self._sequence != other._sequence:
return False
else:
return True
def __getitem__(self, i):
"""The indexing operator.
Parameters
----------
i : int
The position to return from the `BiologicalSequence`.
Returns
-------
str
The character at position `i` in the `BiologicalSequence`.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUCGUGAAGGA')
>>> s[1]
<BiologicalSequence: G (length: 1)>
.. shownumpydoc
"""
try:
return self.__class__(self._sequence[i],
self.identifier, self.description)
except IndexError:
raise IndexError(
"Position %d is out of range for %r." % (i, self))
def __hash__(self):
"""The hash operator.
Returns
-------
int
The hash of the `BiologicalSequence`.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUCGUGAAGGA')
>>> hash(s)
-1080059835405276950
.. shownumpydoc
"""
return hash(self._sequence)
def __iter__(self):
"""The iter operator.
Returns
-------
iterator
Position iterator for the `BiologicalSequence`.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUC')
>>> for c in s: print c
G
G
U
C
.. shownumpydoc
"""
return iter(self._sequence)
def __len__(self):
"""The len operator.
Returns
-------
int
The length of the `BiologicalSequence`.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUC')
>>> len(s)
4
.. shownumpydoc
"""
return len(self._sequence)
def __ne__(self, other):
"""The inequality operator.
Parameters
----------
other : `BiologicalSequence`
The sequence to test for inequality against.
Returns
-------
bool
Indicates whether `self` and `other` are not equal.
Notes
-----
`BiologicalSequences` are not equal if their sequence is different or
they are not the same type.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUCGUGAAGGA')
>>> t = BiologicalSequence('GGUCGUGAAGGA')
>>> s != t
False
>>> u = BiologicalSequence('GGUCGUGACCGA')
>>> u != t
True
.. shownumpydoc
"""
return not self.__eq__(other)
def __repr__(self):
"""The repr method.
Returns
-------
str
Returns a string representation of the object.
Notes
-----
String representation contains the class name, the first ten
characters of the sequence followed by elipses (or the full sequence
and no elipses, if the sequence is less than 11 characters long),
followed by the sequence length.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUCGUGAAGGA')
>>> repr(s)
'<BiologicalSequence: GGUCGUGAAG... (length: 12)>'
>>> t = BiologicalSequence('ACGT')
>>> repr(t)
'<BiologicalSequence: ACGT (length: 4)>'
>>> t
<BiologicalSequence: ACGT (length: 4)>
.. shownumpydoc
"""
first_ten = str(self)[:10]
cn = self.__class__.__name__
length = len(self)
if length > 10:
elipses = "..."
else:
elipses = ""
return '<%s: %s%s (length: %d)>' % (cn, first_ten, elipses, length)
def __reversed__(self):
"""The reversed operator.
Returns
-------
iterator
Reverse position iterator for the `BiologicalSequence`.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUC')
>>> for c in reversed(s): print c
C
U
G
G
.. shownumpydoc
"""
return reversed(self._sequence)
def __str__(self):
"""The str operator
Returns
-------
str
String representation of the `BiologicalSequence`. This will be the
full sequence, but will not contain information about the type, or
`self.identifier` or `self.description`.
See Also
--------
to_fasta
identifier
description
__repr__
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUC')
>>> str(s)
'GGUC'
>>> print s
GGUC
.. shownumpydoc
"""
return ''.join(self._sequence)
@property
def description(self):
"""Return the description of the `BiologicalSequence`
Returns
-------
str
The description attribute of the `BiologicalSequence`
"""
return self._description
@property
def identifier(self):
"""Return the identifier of the `BiologicalSequence`
Returns
-------
str
The identifier attribute of the `BiologicalSequence`
"""
return self._identifier
def count(self, subsequence):
"""Returns the number of occurences of subsequence.
Parameters
----------
subsequence : str
The subsequence to count occurences of.
Returns
-------
int
The number of occurrences of substring in the `BiologicalSequence`.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUC')
>>> s.count('G')
2
"""
return self._sequence.count(subsequence)
def degap(self):
"""Returns a new `BiologicalSequence` with gaps characters removed.
Returns
-------
BiologicalSequence
A new `BiologicalSequence` with all characters from
`self.gap_alphabet` filtered from the sequence.
Notes
-----
The type, identifier, and description of the result will be the
same as `self`.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUC-C--ACGTT-C.')
>>> t = s.degap()
>>> t
<BiologicalSequence: GGUCCACGTT... (length: 11)>
>>> print t
GGUCCACGTTC
"""
gaps = self.gap_alphabet()
result = [e for e in self._sequence if e not in gaps]
return self.__class__(result, identifier=self._identifier,
description=self._description)
def distance(self, other, distance_fn=None):
"""Returns the distance to other
Parameters
----------
other : `BiologicalSequence`
The `BiologicalSequence` to compute the distance to.
distance_fn : function, optional
Function used to compute the distance between `self` and `other`.
If ``None`` (the default), `scipy.spatial.distance.hamming` will be
used.
Returns
-------
float
The distance between `self` and `other`.
Raises
------
skbio.core.exception.BiologicalSequenceError
If ``len(self) != len(other)``.
See Also
--------
fraction_diff
fraction_same
skbio.core.distance.DistanceMatrix
scipy.spatial.distance.hamming
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUC')
>>> t = BiologicalSequence('AGUC')
>>> s.distance(t)
0.25
>>> def dumb_dist(s1, s2): return 0.42
>>> s.distance(t, dumb_dist)
0.42
"""
if distance_fn is None:
distance_fn = hamming
if len(self) != len(other):
raise BiologicalSequenceError(
"Distance can only be computed between BiologicalSequences "
"of equal length.")
return distance_fn(self, other)
def fraction_diff(self, other):
"""Return fraction of positions that differ relative to `other`
Parameters
----------
other : `BiologicalSequence`
The `BiologicalSequence` to compare against.
Returns
-------
float
The fraction of positions that differ between `self` and `other`.
Raises
------
skbio.core.exception.BiologicalSequenceError
If ``len(self) != len(other)``.
See Also
--------
distance
fraction_same
scipy.spatial.distance.hamming
Notes
-----
Computed as the Hamming distance between `self` and `other`. This is
available in addition to `distance` in case the `distance` method is
updated to use something other than ``scipy.spatial.distance.hamming``
as the default distance metric. So, if you specifically want the
fraction of positions that differ, you should use this function instead
of `distance` to ensure backward compatibility.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUC')
>>> t = BiologicalSequence('AGUC')
>>> s.fraction_diff(t)
0.25
"""
return self.distance(other, distance_fn=hamming)
def fraction_same(self, other):
"""Return fraction of positions that are the same relative to `other`
Parameters
----------
other : `BiologicalSequence`
The `BiologicalSequence` to compare against.
Returns
-------
float
The fraction of positions that are the same between `self` and
`other`.
Raises
------
skbio.core.exception.BiologicalSequenceError
If ``len(self) != len(other)``.
See Also
--------
distance
fraction_diff
scipy.spatial.distance.hamming
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('GGUC')
>>> t = BiologicalSequence('AGUC')
>>> s.fraction_same(t)
0.75
"""
return 1. - self.fraction_diff(other)
def gap_maps(self):
"""Return tuples mapping b/w gapped and ungapped positions
Returns
-------
tuple containing two lists
The first list is the length of the ungapped sequence, and each
entry is the position of that base in the gapped sequence. The
second list is the length of the gapped sequence, and each entry is
either None (if that position represents a gap) or the position of
that base in the ungapped sequence.
See Also
--------
gap_vector
Notes
-----
Visual aid is useful here. Imagine we have
``BiologicalSequence('-ACCGA-TA-')``. The position numbers in the
ungapped sequence and gapped sequence will be as follows::
0123456
ACCGATA
|||||\\
-ACCGA-TA-
0123456789
So, in the first list, position 0 maps to position 1, position 1
maps to position 2, position 5 maps to position 7, ... And, in the
second list, position 0 doesn't map to anything (so it's None),
position 1 maps to position 0, ...
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('-ACCGA-TA-')
>>> m = s.gap_maps()
>>> m[0]
[1, 2, 3, 4, 5, 7, 8]
>>> m[1]
[None, 0, 1, 2, 3, 4, None, 5, 6, None]
"""
degapped_to_gapped = []
gapped_to_degapped = []
non_gap_count = 0
for i, e in enumerate(self):
if self.is_gap(e):
gapped_to_degapped.append(None)
else:
gapped_to_degapped.append(non_gap_count)
degapped_to_gapped.append(i)
non_gap_count += 1
return degapped_to_gapped, gapped_to_degapped
def gap_vector(self):
"""Return list indicating positions containing gaps
Returns
-------
list of booleans
The list will be of length ``len(self)``, and a position will
contain ``True`` if the character at that position in the
`BiologicalSequence` is in `self.gap_alphabet`, and ``False``
otherwise.
See Also
--------
gap_maps
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('..ACG--TT-')
>>> s.gap_vector()
[True, True, False, False, False, True, True, False, False, True]
"""
return map(self.is_gap, self._sequence)
def unsupported_characters(self):
"""Return the set of unsupported characters in the `BiologicalSequence`
Returns
-------
set
Invalid characters in the `BiologicalSequence` (i.e., the
characters that are present in the `BiologicalSequence` but which
are not in `BiologicalSequence.alphabet` or
`BiologicalSequence.gap_alphabet`.
See Also
--------
is_valid
alphabet
gap_alphabet
has_unsupported_characters
"""
return set(self) - self.alphabet() - self.gap_alphabet()
def has_unsupported_characters(self):
"""Return bool indicating presence/absence of unsupported characters
Returns
-------
bool
``True`` if invalid characters are present in the
`BiologicalSequence` (i.e., characters which are not in
`BiologicalSequence.alphabet` or
`BiologicalSequence.gap_alphabet`) and ``False`` otherwise.
See Also
--------
is_valid
alphabet
gap_alphabet
has_unsupported_characters
"""
all_supported = self.alphabet() | self.gap_alphabet()
for e in self:
if e not in all_supported:
return True
return False
def index(self, subsequence):
"""Return the position where subsequence first occurs
Returns
-------
int
The position where `subsequence` first occurs in the
`BiologicalSequence`.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('ACACGACGTT-')
>>> s.index('ACG')
2
"""
try:
return self._sequence.index(subsequence)
except ValueError:
raise ValueError(
"%s is not present in %r." % (subsequence, self))
@classmethod
def is_gap(cls, char):
"""Return True if `char` is in the `gap_alphabet` set
Parameters
----------
char : str
The string to check for presence in the `BiologicalSequence`
`gap_alphabet`.
Returns
-------
bool
Indicates whether `char` is in the `BiologicalSequence` attribute
`gap_alphabet`.
Notes
-----
This is a class method.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> BiologicalSequence.is_gap('.')
True
>>> BiologicalSequence.is_gap('P')
False
>>> s = BiologicalSequence('ACACGACGTT')
>>> s.is_gap('-')
True
"""
return char in cls.gap_alphabet()
def is_gapped(self):
"""Return True if char(s) in `gap_alphabet` are present
Returns
-------
bool
Indicates whether there are one or more occurences of any character
in `self.gap_alphabet` in the `BiologicalSequence`.
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('ACACGACGTT')
>>> s.is_gapped()
False
>>> t = BiologicalSequence('A.CAC--GACGTT')
>>> t.is_gapped()
True
"""
for e in self:
if self.is_gap(e):
return True
return False
def is_valid(self):
"""Return True if the sequence is valid
Returns
-------
bool
``True`` if `self` is valid, and ``False`` otherwise.
Notes
-----
Validity is defined as not containing any characters outside of
`self.alphabet` and `self.gap_alphabet`.
"""
return not self.has_unsupported_characters()
def lower(self):
"""Convert the BiologicalSequence to lowercase
Returns
-------
BiologicalSequence
The `BiologicalSequence` with all characters converted to
lowercase.
"""
return self.__class__(self._sequence.lower(),
self.identifier, self.description)
def to_fasta(self, field_delimiter=" ", terminal_character="\n"):
"""Return the sequence as a fasta-formatted string
Parameters
----------
field_delimiter : str, optional
The character(s) to use on the header line between the
`self.identifier` and `self.description`.
terminal_character : str, optional
The last character to be included in the result (if you don't want
a trailing newline or other character in the result, you can pass
``terminal_character=""``).
Returns
-------
str
The `BiologicalSequence` as a fasta-formatted string.
See Also
--------
__str__
Examples
--------
>>> from skbio.core.sequence import BiologicalSequence
>>> s = BiologicalSequence('ACACGACGTT')
>>> print s.to_fasta(terminal_character="")
>
ACACGACGTT
>>> t = BiologicalSequence('ACA',identifier='my-seq',description='h')
>>> print t.to_fasta(terminal_character="")
>my-seq h
ACA
"""
if self._description:
header_line = '%s%s%s' % (self._identifier, field_delimiter,
self._description)
else:
header_line = self._identifier
return '>%s\n%s%s' % (
header_line, str(self), terminal_character)
def upper(self):
"""Convert the BiologicalSequence to uppercase
Returns
-------
BiologicalSequence
The `BiologicalSequence` with all characters converted to
uppercase.
"""
return self.__class__(self._sequence.upper(),
self.identifier, self.description)
class NucleotideSequence(BiologicalSequence):
"""Base class for nucleotide sequences.
A `NucleotideSequence` is a `BiologicalSequence` with additional methods
that are only applicable for nucleotide sequences, and containing only
characters used in the IUPAC DNA or RNA lexicon.
See Also
--------
BiologialSequence
Notes
-----
All uppercase and lowercase IUPAC DNA/RNA characters are supported.
"""
@classmethod
def alphabet(cls):
"""Return the set of characters allowed in a `NucleotideSequence`.
Returns
-------
set
Characters that are allowed in a valid `NucleotideSequence`.
"""
return cls.iupac_characters()
@classmethod
def complement_map(cls):
"""Return the mapping of characters to their complements.
Returns
-------
dict
Mapping of characters to their complements.
Notes
-----
Complements cannot be defined for a generic `NucleotideSequence`
because the complement of 'A' is ambiguous.
`NucleotideSequence.complement_map` will therefore be the empty dict.
Thanks, nature...
"""
return {}
@classmethod
def iupac_standard_characters(cls):
"""Return the non-degenerate IUPAC nucleotide characters.
Returns
-------
set
Non-degenerate IUPAC nucleotide characters.
"""
return set("ACGTUacgtu")
@classmethod
def iupac_degeneracies(cls):
"""Return the mapping of degenerate to non-degenerate characters.
Returns
-------
dict of sets
Mapping of IUPAC degenerate nucleotide character to the set of
non-degenerate IUPAC nucleotide characters it represents.
"""
degen_map = {
"R": set("AG"), "Y": set("CTU"), "M": set("AC"), "K": set("TUG"),
"W": set("ATU"), "S": set("GC"), "B": set("CGTU"),
"D": set("AGTU"), "H": set("ACTU"), "V": set("ACG"),
"N": set("ACGTU")
}
for degen_char in list(degen_map.keys()):
nondegen_chars = degen_map[degen_char]
degen_map[degen_char.lower()] = set(
''.join(nondegen_chars).lower())
return degen_map
@classmethod
def iupac_degenerate_characters(cls):
"""Return the degenerate IUPAC nucleotide characters.
Returns
-------
set
Degenerate IUPAC nucleotide characters.
"""
return set(cls.iupac_degeneracies())
@classmethod
def iupac_characters(cls):
"""Return the non-degenerate and degenerate nucleotide characters.
Returns
-------
set
Non-degenerate and degenerate nucleotide characters.
"""
return (cls.iupac_standard_characters() |
cls.iupac_degenerate_characters())
def _complement(self, seq_iterator):
"""Returns `NucleotideSequence` that is complement of `seq_iterator`
Parameters
----------
seq_iterator : iterator
The `BiologicalSequence` to be complemented.
Returns
-------
NucelotideSequence
The complement of the sequence represented by `seq_iterator`.
Specific type will be the same as ``type(self)``.
Raises
------
skbio.core.exception.BiologicalSequenceError
If a character is present in the `NucleotideSequence` that is not
in the complement map.
Notes
-----
This private method centralizes the logic for `complement` and
`reverse_complement` by taking the sequence as an iterator (so it can
be passed the result of either `iter` or `reversed`).
"""
result = []
complement_map = self.complement_map()
for base in seq_iterator:
try:
result.append(complement_map[base])
except KeyError:
raise BiologicalSequenceError(
"Don't know how to complement base %s. Is it in "
"%s.complement_map?" % (base, self.__class__.__name__))
return self.__class__(result, self._identifier, self._description)
def complement(self):
"""Return the complement of the `NucleotideSequence`
Returns
-------
NucelotideSequence
The complement of `self`. Specific type will be the same as
``type(self)``.
Raises
------
skbio.core.exception.BiologicalSequenceError
If a character is present in the `NucleotideSequence` that is not
in `self.complement_map`.
See Also
--------
reverse_complement
complement_map
"""
return self._complement(self)
def is_reverse_complement(self, other):
"""Return True if `other` is the reverse complement of `self`
Returns
-------
bool
`True` if `other` is the reverse complement of `self` and `False`
otherwise.
Raises
------
skbio.core.exception.BiologicalSequenceError
If a character is present in `other` that is not in the
`self.complement_map`.
See Also
--------
reverse_complement
"""
return self == other.reverse_complement()
def reverse_complement(self):
"""Return the reverse complement of the `NucleotideSequence`
Returns
-------
NucelotideSequence
The reverse complement of `self`. Specific type will be the same as
``type(self)``.
Raises
------
skbio.core.exception.BiologicalSequenceError
If a character is present in the `NucleotideSequence` that is not
in `self.complement_map`.
See Also
--------
complement
complement_map
is_reverse_complement
"""
return self._complement(reversed(self))
rc = reverse_complement
def nondegenerates(self):
"""Yield all nondegenerate versions of the sequence.
Returns
-------
generator
Generator yielding all possible nondegenerate versions of the
sequence. Each sequence will have the same type, identifier, and
description as `self`.
Raises
------
BiologicalSequenceError
If the sequence contains an invalid character (a character that
isn't an IUPAC character or a gap character).
See Also
--------
iupac_degeneracies
Notes
-----
There is no guaranteed ordering to the generated sequences.
Examples
--------
>>> from skbio.core.sequence import NucleotideSequence
>>> seq = NucleotideSequence('TRG')
>>> seq_generator = seq.nondegenerates()
>>> for s in sorted(seq_generator, key=str): print(s)
TAG
TGG
"""
degen_chars = self.iupac_degeneracies()
nonexpansion_chars = self.iupac_standard_characters().union(
self.gap_alphabet())
expansions = []
for char in self:
if char in nonexpansion_chars:
expansions.append(char)
else:
# Use a try/except instead of explicitly checking for set
# membership on the assumption that an exception is rarely
# thrown.
try:
expansions.append(degen_chars[char])
except KeyError:
raise BiologicalSequenceError(
"Sequence contains an invalid character: %s" % char)
result = product(*expansions)
# Cache lookups here as there may be a lot of sequences to generate.
# Could use functools.partial, but it ends up being a little slower
# than this method.
id_ = self.identifier
desc = self.description
cls = self.__class__
return (cls(nondegen_seq, id_, desc) for nondegen_seq in result)
class DNASequence(NucleotideSequence):
"""Base class for DNA sequences.
A `DNASequence` is a `NucelotideSequence` that is restricted to only
containing characters used in IUPAC DNA lexicon.
See Also
--------
NucleotideSequence
BiologicalSequence
Notes
-----
All uppercase and lowercase IUPAC DNA characters are supported.
"""
@classmethod
def complement_map(cls):
"""Return the mapping of characters to their complements.
The complement of a gap character is itself.
Returns
-------
dict
Mapping of characters to their complements.
"""
comp_map = {
'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N', 'a': 't', 't': 'a', 'g': 'c',
'c': 'g', 'y': 'r', 'r': 'y', 's': 's', 'w': 'w', 'k': 'm',
'm': 'k', 'b': 'v', 'd': 'h', 'h': 'd', 'v': 'b', 'n': 'n'
}
comp_map.update({c: c for c in cls.gap_alphabet()})
return comp_map
@classmethod
def iupac_standard_characters(cls):
"""Return the non-degenerate IUPAC DNA characters.
Returns
-------
set
Non-degenerate IUPAC DNA characters.
"""
return set("ACGTacgt")
@classmethod
def iupac_degeneracies(cls):
"""Return the mapping of degenerate to non-degenerate characters.
Returns
-------
dict of sets
Mapping of IUPAC degenerate DNA character to the set of
non-degenerate IUPAC DNA characters it represents.
"""
degen_map = {
"R": set("AG"), "Y": set("CT"), "M": set("AC"), "K": set("TG"),
"W": set("AT"), "S": set("GC"), "B": set("CGT"), "D": set("AGT"),
"H": set("ACT"), "V": set("ACG"), "N": set("ACGT")
}
for degen_char in list(degen_map.keys()):
nondegen_chars = degen_map[degen_char]
degen_map[degen_char.lower()] = set(
''.join(nondegen_chars).lower())
return degen_map
# class is accessible with alternative name for convenience
DNA = DNASequence
class RNASequence(NucleotideSequence):
"""Base class for RNA sequences.
An `RNASequence` is a `NucelotideSequence` that is restricted to only
containing characters used in the IUPAC RNA lexicon.
Notes
-----
All uppercase and lowercase IUPAC RNA characters are supported.
"""
@classmethod
def complement_map(cls):
"""Return the mapping of characters to their complements.
The complement of a gap character is itself.
Returns
-------
dict
Mapping of characters to their complements.
"""
comp_map = {
'A': 'U', 'U': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N', 'a': 'u', 'u': 'a', 'g': 'c',
'c': 'g', 'y': 'r', 'r': 'y', 's': 's', 'w': 'w', 'k': 'm',
'm': 'k', 'b': 'v', 'd': 'h', 'h': 'd', 'v': 'b', 'n': 'n'
}
comp_map.update({c: c for c in cls.gap_alphabet()})
return comp_map
@classmethod
def iupac_standard_characters(cls):
"""Return the non-degenerate IUPAC RNA characters.
Returns
-------
set
Non-degenerate IUPAC RNA characters.
"""
return set("ACGUacgu")
@classmethod
def iupac_degeneracies(cls):
"""Return the mapping of degenerate to non-degenerate characters.
Returns
-------
dict of sets
Mapping of IUPAC degenerate RNA character to the set of
non-degenerate IUPAC RNA characters it represents.
"""
degen_map = {
"R": set("AG"), "Y": set("CU"), "M": set("AC"), "K": set("UG"),
"W": set("AU"), "S": set("GC"), "B": set("CGU"), "D": set("AGU"),
"H": set("ACU"), "V": set("ACG"), "N": set("ACGU")
}
for degen_char in list(degen_map.keys()):
nondegen_chars = degen_map[degen_char]
degen_map[degen_char.lower()] = set(
''.join(nondegen_chars).lower())
return degen_map
# class is accessible with alternative name for convenience
RNA = RNASequence
|
{
"content_hash": "5ae63a9a0e35621a9b2576ad63f04a7d",
"timestamp": "",
"source": "github",
"line_count": 1417,
"max_line_length": 79,
"avg_line_length": 27.726887791107973,
"alnum_prop": 0.5438926926111635,
"repo_name": "Jorge-C/bipy",
"id": "3a6304b5f292ebb9b240c0db6056b37174cfcd7b",
"size": "39311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skbio/core/sequence.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1007233"
}
],
"symlink_target": ""
}
|
import datetime, hashlib, feedparser
from django.core.management.base import BaseCommand, CommandError
from aggregator.models import Feed, FeedPost
from unidecode import unidecode
class Command(BaseCommand):
args = ''
help = 'Updates all feeds'
def handle(self, *args, **options):
all_feeds = Feed.objects.all()
self.stdout.write('Updating feeds...\n')
for f in all_feeds:
self.stdout.write('\n\n' + f.title + ' (' + f.feed_url + ')\n')
parsed = feedparser.parse(f.feed_url)
new_posts = 0
for e in parsed.entries:
h = hashlib.new('md5')
h.update(unidecode(f.title))
h.update(unidecode(f.feed_url))
h.update(unidecode(e.title))
h.update(unidecode(e.link))
h.update(unidecode(e.description))
digest = h.hexdigest()
post = FeedPost.objects.filter(hash=digest)
if len(post) == 0:
new_posts += 1
post = FeedPost()
post.from_feed = f
post.title = e.title
post.url = e.link
post.description = e.description
post.date = datetime.datetime.now()
post.hash = digest
if not f.moderate:
post.published = True
post.save()
#self.stdout.write(u'* ' + e.title + '\n ' + e.link + ' :: \n')
#self.stdout.write(u'* ' + e.title + '\n ' + e.link + ' :: '+unicode(digest)+'\n')
self.stdout.write(unicode(new_posts) + u' new posts in ' + f.title + u'\n\n')
f.save()
|
{
"content_hash": "e385aac2498a9aaf0c40be7adce3ce75",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 88,
"avg_line_length": 25.696428571428573,
"alnum_prop": 0.6004169562195969,
"repo_name": "wd5/jangr",
"id": "3879367e2293462badb7e568e126c1e11008d355",
"size": "1463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aggregator/management/commands/update_feeds.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
'''Provides an object model for a Singer Catalog.'''
import json
import sys
from . import metadata as metadata_module
from .bookmarks import get_currently_syncing
from .logger import get_logger
from .schema import Schema
LOGGER = get_logger()
def write_catalog(catalog):
# If the catalog has no streams, log a warning
if not catalog.streams:
LOGGER.warning("Catalog being written with no streams.")
json.dump(catalog.to_dict(), sys.stdout, indent=2)
# pylint: disable=too-many-instance-attributes
class CatalogEntry():
def __init__(self, tap_stream_id=None, stream=None,
key_properties=None, schema=None, replication_key=None,
is_view=None, database=None, table=None, row_count=None,
stream_alias=None, metadata=None, replication_method=None):
self.tap_stream_id = tap_stream_id
self.stream = stream
self.key_properties = key_properties
self.schema = schema
self.replication_key = replication_key
self.replication_method = replication_method
self.is_view = is_view
self.database = database
self.table = table
self.row_count = row_count
self.stream_alias = stream_alias
self.metadata = metadata
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def is_selected(self):
mdata = metadata_module.to_map(self.metadata)
# pylint: disable=no-member
return self.schema.selected or metadata_module.get(mdata, (), 'selected')
def to_dict(self):
result = {}
if self.tap_stream_id:
result['tap_stream_id'] = self.tap_stream_id
if self.database:
result['database_name'] = self.database
if self.table:
result['table_name'] = self.table
if self.replication_key is not None:
result['replication_key'] = self.replication_key
if self.replication_method is not None:
result['replication_method'] = self.replication_method
if self.key_properties is not None:
result['key_properties'] = self.key_properties
if self.schema is not None:
schema = self.schema.to_dict() # pylint: disable=no-member
result['schema'] = schema
if self.is_view is not None:
result['is_view'] = self.is_view
if self.stream is not None:
result['stream'] = self.stream
if self.row_count is not None:
result['row_count'] = self.row_count
if self.stream_alias is not None:
result['stream_alias'] = self.stream_alias
if self.metadata is not None:
result['metadata'] = self.metadata
return result
class Catalog():
def __init__(self, streams):
self.streams = streams
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
@classmethod
def load(cls, filename):
with open(filename) as fp: # pylint: disable=invalid-name
return Catalog.from_dict(json.load(fp))
@classmethod
def from_dict(cls, data):
# TODO: We may want to store streams as a dict where the key is a
# tap_stream_id and the value is a CatalogEntry. This will allow
# faster lookup based on tap_stream_id. This would be a breaking
# change, since callers typically access the streams property
# directly.
streams = []
for stream in data['streams']:
entry = CatalogEntry()
entry.tap_stream_id = stream.get('tap_stream_id')
entry.stream = stream.get('stream')
entry.replication_key = stream.get('replication_key')
entry.key_properties = stream.get('key_properties')
entry.database = stream.get('database_name')
entry.table = stream.get('table_name')
entry.schema = Schema.from_dict(stream.get('schema'))
entry.is_view = stream.get('is_view')
entry.stream_alias = stream.get('stream_alias')
entry.metadata = stream.get('metadata')
entry.replication_method = stream.get('replication_method')
streams.append(entry)
return Catalog(streams)
def to_dict(self):
return {'streams': [stream.to_dict() for stream in self.streams]}
def dump(self):
json.dump(self.to_dict(), sys.stdout, indent=2)
def get_stream(self, tap_stream_id):
for stream in self.streams:
if stream.tap_stream_id == tap_stream_id:
return stream
return None
def _shuffle_streams(self, state):
currently_syncing = get_currently_syncing(state)
if currently_syncing is None:
return self.streams
matching_index = 0
for i, catalog_entry in enumerate(self.streams):
if catalog_entry.tap_stream_id == currently_syncing:
matching_index = i
break
top_half = self.streams[matching_index:]
bottom_half = self.streams[:matching_index]
return top_half + bottom_half
def get_selected_streams(self, state):
for stream in self._shuffle_streams(state):
if not stream.is_selected():
LOGGER.info('Skipping stream: %s', stream.tap_stream_id)
continue
yield stream
|
{
"content_hash": "6bdd1041e52dda0f9eb8d03d49c74179",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 81,
"avg_line_length": 35.22435897435897,
"alnum_prop": 0.6032757051865332,
"repo_name": "singer-io/singer-python",
"id": "1767ff15c8e5b0c6c551dbcac0a3e4db661397cb",
"size": "5495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "singer/catalog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "447"
},
{
"name": "Python",
"bytes": "127839"
}
],
"symlink_target": ""
}
|
from . import util
DOCKER_IMAGE = 'gcr.io/skia-public/cmake-release:3.13.1_v2'
INNER_BUILD_SCRIPT = '/SRC/skia/infra/cmake/build_skia.sh'
def compile_fn(api, checkout_root, _ignore):
out_dir = api.vars.cache_dir.join('docker', 'cmake')
configuration = api.vars.builder_cfg.get('configuration', '')
if configuration != 'Release': # pragma: nocover
# If a debug mode is wanted, update the infra/cmake/build_skia.sh
# to support that also.
raise 'Only Release mode supported for CMake'
# We want to make sure the directories exist and were created by chrome-bot,
# because if that isn't the case, docker will make them and they will be
# owned by root, which causes mysterious failures. To mitigate this risk
# further, we don't use the same out_dir as everyone else (thus the _ignore)
# param. Instead, we use a "cmake" subdirectory in the "docker" named_cache.
api.file.ensure_directory('mkdirs out_dir', out_dir, mode=0777)
# This uses the cmake docker image and says "run the
# build_skia.sh helper script in there". Additionally, it binds two
# folders: the Skia checkout to /SRC and the output directory to /OUT
# The called helper script will make the compile happen and put the
# output in the right spot. The neat thing is that since the Skia checkout
# (and, by extension, the build script) is not a part of the image, but
# bound in at runtime, we don't have to re-build the image, except when the
# toolchain changes.
cmd = ['docker', 'run', '--rm', '--volume', '%s:/SRC' % checkout_root,
'--volume', '%s:/OUT' % out_dir,
DOCKER_IMAGE, INNER_BUILD_SCRIPT]
# It's always Release mode (for now?) This is mostly an FYI
# thing, to make sure we don't break CMake users.
# Override DOCKER_CONFIG set by Kitchen.
env = {'DOCKER_CONFIG': '/home/chrome-bot/.docker'}
with api.env(env):
api.run(
api.step,
'Build Skia using CMake in Docker',
cmd=cmd)
def copy_build_products(api, src, dst):
util.copy_listed_files(api, src, dst, util.DEFAULT_BUILD_PRODUCTS)
|
{
"content_hash": "a5def6297e4b26267812883c8cb06569",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 46.044444444444444,
"alnum_prop": 0.6916023166023166,
"repo_name": "endlessm/chromium-browser",
"id": "22de11f0fdf23b1e4e6fdbe0347c93e6a8fb671c",
"size": "2235",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "third_party/skia/infra/bots/recipe_modules/build/cmake.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import print_function, division
from sympy.core.compatibility import range
from sympy.core.decorators import wraps
def recurrence_memo(initial):
"""
Memo decorator for sequences defined by recurrence
See usage examples e.g. in the specfun/combinatorial module
"""
cache = initial
def decorator(f):
@wraps(f)
def g(n):
L = len(cache)
if n <= L - 1:
return cache[n]
for i in range(L, n + 1):
cache.append(f(i, cache))
return cache[-1]
return g
return decorator
def assoc_recurrence_memo(base_seq):
"""
Memo decorator for associated sequences defined by recurrence starting from base
base_seq(n) -- callable to get base sequence elements
XXX works only for Pn0 = base_seq(0) cases
XXX works only for m <= n cases
"""
cache = []
def decorator(f):
@wraps(f)
def g(n, m):
L = len(cache)
if n < L:
return cache[n][m]
for i in range(L, n + 1):
# get base sequence
F_i0 = base_seq(i)
F_i_cache = [F_i0]
cache.append(F_i_cache)
# XXX only works for m <= n cases
# generate assoc sequence
for j in range(1, i + 1):
F_ij = f(i, j, cache)
F_i_cache.append(F_ij)
return cache[n][m]
return g
return decorator
|
{
"content_hash": "7e580ffe6a2c16ae55a092241863a0fa",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 84,
"avg_line_length": 24.725806451612904,
"alnum_prop": 0.5146771037181996,
"repo_name": "kaushik94/sympy",
"id": "6d23a15017658901d449164a58225d30eee1eade",
"size": "1533",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sympy/utilities/memoization.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5094"
},
{
"name": "Python",
"bytes": "13553568"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "TeX",
"bytes": "32356"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
from django import forms
from django.core.validators import RegexValidator
from django.forms import ModelForm
from django.utils.translation import ugettext_lazy as _
from .models import Drug, Category
class UserModelChoiceField(forms.ModelChoiceField):
# return the name instead of the object in the select option list
def label_from_instance(self, obj):
return obj.name
class AddDrugsForm(ModelForm):
id = forms.CharField(label=_("Drug ID"), required=True, max_length=254,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Drug ID',
'readonly': 'readonly'}))
friendly_name = forms.CharField(label=_("Friendly Name"), required=True, max_length=254,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Friendly name',
'readonly': 'readonly'}))
alphanumeric = RegexValidator(r'^[0-9a-zA-Z]*$', 'Only alphanumeric characters are allowed.')
availability = forms.IntegerField(validators=[alphanumeric], label=_("Availability"), required=True,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Availability'}))
description = forms.CharField(label=_("Description"), required=True, max_length=254,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Description'}))
price = forms.DecimalField(label=_("Price"), required=True, max_digits=5, decimal_places=2,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Price',
'readonly': 'readonly'}))
category = UserModelChoiceField(queryset=Category.objects.all(), widget=forms.Select({
'class': 'form-control',
'placeholder': 'Category'}))
error_css_class = 'error'
class Meta:
model = Drug
fields = ['id', 'friendly_name', 'availability', 'description', 'price', 'category']
def __init__(self, *args, **kwargs):
categoryChoices = kwargs.pop("categorychoices") # categorychoices is the parameter passed from views.py
category = kwargs.pop("category")
super(AddDrugsForm, self).__init__(*args, **kwargs)
if category is not None:
self.category = kwargs.pop(category, None)
if categoryChoices is not None:
self.fields['category'] = forms.ChoiceField(label="Category", choices=categoryChoices, widget=forms.Select({
'class': 'form-control',
'placeholder': 'Category'}))
class UpdateDrugsForm(ModelForm):
id = forms.CharField(label=_("Drug ID"), required=True, max_length=254,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Drug ID',
'readonly': 'readonly'}))
friendly_name = forms.CharField(label=_("Friendly Name"), required=True, max_length=254,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Friendly name',
'readonly': 'readonly'}))
alphanumeric = RegexValidator(r'^[0-9a-zA-Z]*$', 'Only alphanumeric characters are allowed.')
availability = forms.IntegerField(validators=[alphanumeric], label=_("Availability"), required=True,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Availability'}))
description = forms.CharField(label=_("Description"), required=True, max_length=254,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Description'}))
price = forms.DecimalField(label=_("Price"), required=True, max_digits=5, decimal_places=2,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Price',
'readonly': 'readonly'}))
category = UserModelChoiceField(queryset=Category.objects.all(), widget=forms.Select({
'class': 'form-control',
'placeholder': 'Category'}))
class Meta:
model = Drug
fields = ['id', 'friendly_name', 'availability', 'description', 'price', 'category']
def __init__(self, *args, **kwargs):
super(UpdateDrugsForm, self).__init__(*args, **kwargs)
class AddDrugCategoryForm(ModelForm):
name = forms.CharField(label=_("Category Name"), required=True, max_length=50,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Category Name'}))
description = forms.CharField(label=_("Category Description"), required=True, max_length=500,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Category Description'}))
class Meta:
model = Category
fields = ['name', 'description']
class UpdateDrugCategoryForm(ModelForm):
id = forms.IntegerField(label=_("Category ID"), required=True,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Category ID',
'readonly': 'readonly'}))
name = forms.CharField(label=_("Category Name"), required=True, max_length=50,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Category Name'}))
description = forms.CharField(label=_("Category Description"), required=True, max_length=500,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Category Description'}))
class Meta:
model = Category
fields = ['id', 'name', 'description']
|
{
"content_hash": "186d949fd7a9ae888e4d6106e4f6e62c",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 120,
"avg_line_length": 46.156462585034014,
"alnum_prop": 0.5025792188651437,
"repo_name": "thodoris/djangoPharma",
"id": "6db68b9ac86431a1a6d40e8ea71731dfe4aef6e2",
"size": "6785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangoPharma/drugs/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "90256"
},
{
"name": "HTML",
"bytes": "104080"
},
{
"name": "JavaScript",
"bytes": "7713"
},
{
"name": "Python",
"bytes": "68512"
}
],
"symlink_target": ""
}
|
import os
import sys
import errno
import importlib
import contextlib
from maya import cmds, OpenMaya
import maya.utils
import maya.api.OpenMaya as om
from pyblish import api as pyblish
from . import lib, compat
from ..lib import logger, find_submodule
from .. import api
from ..tools import workfiles
from ..vendor.Qt import QtCore, QtWidgets
from ..pipeline import AVALON_CONTAINER_ID
# Backwards compatibility
load = compat.load
update = compat.update
remove = compat.remove
create = compat.create
self = sys.modules[__name__]
self._menu = "avalonmaya" # Unique name of menu
self._events = dict() # Registered Maya callbacks
self._parent = None # Main Window
self._ignore_lock = False
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
IS_HEADLESS = not hasattr(cmds, "about") or cmds.about(batch=True)
def install():
"""Install Maya-specific functionality of avalon-core.
This function is called automatically on calling `api.install(maya)`.
"""
# Inherit globally set name
self._menu = api.Session["AVALON_LABEL"] + "menu"
_register_callbacks()
_register_events()
_set_project()
# Check if maya version is compatible else fix it, Maya2018 only
# Should be run regardless of batch mode
compat.install()
if not IS_HEADLESS:
_install_menu()
pyblish.register_host("mayabatch")
pyblish.register_host("mayapy")
pyblish.register_host("maya")
def _set_project():
"""Sets the maya project to the current Session's work directory.
Returns:
None
"""
workdir = api.Session["AVALON_WORKDIR"]
try:
os.makedirs(workdir)
except OSError as e:
# An already existing working directory is fine.
if e.errno == errno.EEXIST:
pass
else:
raise
cmds.workspace(workdir, openWorkspace=True)
def get_main_window():
"""Acquire Maya's main window"""
if self._parent is None:
self._parent = {
widget.objectName(): widget
for widget in QtWidgets.QApplication.topLevelWidgets()
}["MayaWindow"]
return self._parent
def uninstall():
"""Uninstall Maya-specific functionality of avalon-core.
This function is called automatically on calling `api.uninstall()`.
"""
_uninstall_menu()
pyblish.deregister_host("mayabatch")
pyblish.deregister_host("mayapy")
pyblish.deregister_host("maya")
def _install_menu():
from ..tools import (
projectmanager,
creator,
loader,
publish,
sceneinventory
)
from . import interactive
_uninstall_menu()
def deferred():
cmds.menu(self._menu,
label=api.Session["AVALON_LABEL"],
tearOff=True,
parent="MayaWindow")
# Create context menu
context_label = "{}, {}".format(
api.Session["AVALON_ASSET"],
api.Session["AVALON_TASK"]
)
cmds.menuItem(
"currentContext",
label=context_label,
parent=self._menu,
enable=False
)
cmds.setParent("..", menu=True)
cmds.menuItem(divider=True)
# Create default items
cmds.menuItem("Create...",
command=lambda *args: creator.show(parent=self._parent))
cmds.menuItem("Load...",
command=lambda *args: loader.show(parent=self._parent,
use_context=True))
cmds.menuItem("Publish...",
command=lambda *args: publish.show(parent=self._parent),
image=publish.ICON)
cmds.menuItem("Manage...",
command=lambda *args: sceneinventory.show(
parent=self._parent))
cmds.menuItem(divider=True)
cmds.menuItem("Work Files", command=launch_workfiles_app)
system = cmds.menuItem("System",
label="System",
tearOff=True,
subMenu=True,
parent=self._menu)
cmds.menuItem("Project Manager",
command=lambda *args: projectmanager.show(
parent=self._parent))
cmds.menuItem("Reinstall Avalon",
label="Reinstall Avalon",
subMenu=True,
parent=system)
cmds.menuItem("Confirm", command=reload_pipeline)
cmds.setParent(self._menu, menu=True)
cmds.menuItem("Reset Frame Range",
command=interactive.reset_frame_range)
cmds.menuItem("Reset Resolution",
command=interactive.reset_resolution)
# Allow time for uninstallation to finish.
# We use Maya's executeDeferred instead of QTimer.singleShot
# so that it only gets called after Maya UI has initialized too.
# This is crucial with Maya 2020+ which initializes without UI
# first as a QCoreApplication
maya.utils.executeDeferred(deferred)
def launch_workfiles_app(*args):
workfiles.show(
os.path.join(
cmds.workspace(query=True, rootDirectory=True),
cmds.workspace(fileRuleEntry="scene")
),
parent=self._parent
)
def reload_pipeline(*args):
"""Attempt to reload pipeline at run-time.
CAUTION: This is primarily for development and debugging purposes.
"""
api.uninstall()
for module in ("avalon.io",
"avalon.lib",
"avalon.pipeline",
"avalon.maya.commands",
"avalon.maya.interactive",
"avalon.maya.pipeline",
"avalon.maya.lib",
"avalon.tools.creator.app",
# NOTE(marcus): These have circular depenendencies
# that is preventing reloadability
# "avalon.tools.loader.delegates",
# "avalon.tools.loader.model",
# "avalon.tools.loader.widgets",
# "avalon.tools.loader.app",
# "avalon.tools.sceneinventory.model",
# "avalon.tools.sceneinventory.proxy",
# "avalon.tools.sceneinventory.app",
# "avalon.tools.projectmanager.dialogs",
# "avalon.tools.projectmanager.lib",
# "avalon.tools.projectmanager.model",
# "avalon.tools.projectmanager.style",
# "avalon.tools.projectmanager.widget",
# "avalon.tools.projectmanager.app",
"avalon.api",
"avalon.tools",
"avalon.maya"):
module = importlib.import_module(module)
reload(module)
get_main_window()
import avalon.maya
api.install(avalon.maya)
def _uninstall_menu():
# In Maya 2020+ don't use the QApplication.instance()
# during startup (userSetup.py) as it will return a
# QtCore.QCoreApplication instance which does not have
# the allWidgets method. As such, we call the staticmethod.
all_widgets = QtWidgets.QApplication.allWidgets()
widgets = dict((w.objectName(), w) for w in all_widgets)
menu = widgets.get(self._menu)
if menu:
menu.deleteLater()
del(menu)
def _update_menu_task_label():
"""Update the task label in Avalon menu to current session"""
if IS_HEADLESS:
return
object_name = "{}|currentContext".format(self._menu)
if not cmds.menuItem(object_name, query=True, exists=True):
logger.warning("Can't find menuItem: {}".format(object_name))
return
label = "{}, {}".format(api.Session["AVALON_ASSET"],
api.Session["AVALON_TASK"])
cmds.menuItem(object_name, edit=True, label=label)
def lock():
"""Lock scene
Add an invisible node to your Maya scene with the name of the
current file, indicating that this file is "locked" and cannot
be modified any further.
"""
if not cmds.objExists("lock"):
with lib.maintained_selection():
cmds.createNode("objectSet", name="lock")
cmds.addAttr("lock", ln="basename", dataType="string")
# Permanently hide from outliner
cmds.setAttr("lock.verticesOnlySet", True)
fname = cmds.file(query=True, sceneName=True)
basename = os.path.basename(fname)
cmds.setAttr("lock.basename", basename, type="string")
def unlock():
"""Permanently unlock a locked scene
Doesn't throw an error if scene is already unlocked.
"""
try:
cmds.delete("lock")
except ValueError:
pass
def is_locked():
"""Query whether current scene is locked"""
fname = cmds.file(query=True, sceneName=True)
basename = os.path.basename(fname)
if self._ignore_lock:
return False
try:
return cmds.getAttr("lock.basename") == basename
except ValueError:
return False
@contextlib.contextmanager
def lock_ignored():
"""Context manager for temporarily ignoring the lock of a scene
The purpose of this function is to enable locking a scene and
saving it with the lock still in place.
Example:
>>> with lock_ignored():
... pass # Do things without lock
"""
self._ignore_lock = True
try:
yield
finally:
self._ignore_lock = False
def containerise(name,
namespace,
nodes,
context,
loader=None,
suffix="CON"):
"""Bundle `nodes` into an assembly and imprint it with metadata
Containerisation enables a tracking of version, author and origin
for loaded assets.
Arguments:
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
nodes (list): Long names of nodes to containerise
context (dict): Asset information
loader (str, optional): Name of loader used to produce this container.
suffix (str, optional): Suffix of container, defaults to `_CON`.
Returns:
container (str): Name of container assembly
"""
container = cmds.sets(nodes, name="%s_%s_%s" % (namespace, name, suffix))
data = [
("schema", "avalon-core:container-2.0"),
("id", AVALON_CONTAINER_ID),
("name", name),
("namespace", namespace),
("loader", str(loader)),
("representation", context["representation"]["_id"]),
]
for key, value in data:
if not value:
continue
if isinstance(value, (int, float)):
cmds.addAttr(container, longName=key, attributeType="short")
cmds.setAttr(container + "." + key, value)
else:
cmds.addAttr(container, longName=key, dataType="string")
cmds.setAttr(container + "." + key, value, type="string")
main_container = cmds.ls(AVALON_CONTAINERS, type="objectSet")
if not main_container:
main_container = cmds.sets(empty=True, name=AVALON_CONTAINERS)
# Implement #399: Maya 2019+ hide AVALON_CONTAINERS on creation..
if cmds.attributeQuery("hiddenInOutliner",
node=main_container,
exists=True):
cmds.setAttr(main_container + ".hiddenInOutliner", True)
else:
main_container = main_container[0]
cmds.sets(container, addElement=main_container)
# Implement #399: Maya 2019+ hide containers in outliner
if cmds.attributeQuery("hiddenInOutliner",
node=container,
exists=True):
cmds.setAttr(container + ".hiddenInOutliner", True)
return container
def parse_container(container):
"""Return the container node's full container data.
Args:
container (str): A container node name.
Returns:
dict: The container schema data for this container node.
"""
data = lib.read(container)
# Backwards compatibility pre-schemas for containers
data["schema"] = data.get("schema", "avalon-core:container-1.0")
# Append transient data
data["objectName"] = container
return data
def _ls():
"""Yields Avalon container node names.
Used by `ls()` to retrieve the nodes and then query the full container's
data.
Yields:
str: Avalon container node name (objectSet)
"""
def _maya_iterate(iterator):
"""Helper to iterate a maya iterator"""
while not iterator.isDone():
yield iterator.thisNode()
iterator.next()
ids = {AVALON_CONTAINER_ID,
# Backwards compatibility
"pyblish.mindbender.container"}
# Iterate over all 'set' nodes in the scene to detect whether
# they have the avalon container ".id" attribute.
fn_dep = om.MFnDependencyNode()
iterator = om.MItDependencyNodes(om.MFn.kSet)
for mobject in _maya_iterate(iterator):
if mobject.apiTypeStr != "kSet":
# Only match by exact type
continue
fn_dep.setObject(mobject)
if not fn_dep.hasAttribute("id"):
continue
plug = fn_dep.findPlug("id", True)
value = plug.asString()
if value in ids:
yield fn_dep.name()
def ls():
"""Yields containers from active Maya scene
This is the host-equivalent of api.ls(), but instead of listing
assets on disk, it lists assets already loaded in Maya; once loaded
they are called 'containers'
Yields:
dict: container
"""
container_names = _ls()
has_metadata_collector = False
config_host = find_submodule(api.registered_config(), "maya")
if hasattr(config_host, "collect_container_metadata"):
has_metadata_collector = True
for container in sorted(container_names):
data = parse_container(container)
# Collect custom data if attribute is present
if has_metadata_collector:
metadata = config_host.collect_container_metadata(container)
data.update(metadata)
yield data
def update_hierarchy(containers):
"""Hierarchical container support
This is the function to support Scene Inventory to draw hierarchical
view for containers.
We need both parent and children to visualize the graph.
"""
container_names = set(_ls()) # lookup set
for container in containers:
# Find parent
parent = cmds.listSets(object=container["objectName"]) or []
for node in parent:
if node in container_names:
container["parent"] = node
break
# List children
children = cmds.ls(cmds.sets(container["objectName"], query=True),
type="objectSet")
container["children"] = [child for child in children
if child in container_names]
yield container
class Creator(api.Creator):
def process(self):
nodes = list()
with lib.undo_chunk():
if (self.options or {}).get("useSelection"):
nodes = cmds.ls(selection=True)
instance = cmds.sets(nodes, name=self.name)
lib.imprint(instance, self.data)
return instance
class Loader(api.Loader):
hosts = ["maya"]
def __init__(self, context):
super(Loader, self).__init__(context)
self.fname = self.fname.replace(
api.registered_root(), "$AVALON_PROJECTS"
)
def publish():
"""Shorthand to publish from within host"""
import pyblish.util
return pyblish.util.publish()
def _register_callbacks():
for handler, event in self._events.copy().items():
if event is None:
continue
try:
OpenMaya.MMessage.removeCallback(event)
self._events[handler] = None
except RuntimeError as e:
logger.info(e)
self._events[_on_scene_save] = OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kBeforeSave, _on_scene_save
)
self._events[_before_scene_save] = OpenMaya.MSceneMessage.addCheckCallback(
OpenMaya.MSceneMessage.kBeforeSaveCheck, _before_scene_save
)
self._events[_on_scene_new] = OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kAfterNew, _on_scene_new
)
self._events[_on_maya_initialized] = OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kMayaInitialized, _on_maya_initialized
)
self._events[_on_scene_open] = OpenMaya.MSceneMessage.addCallback(
OpenMaya.MSceneMessage.kAfterOpen, _on_scene_open
)
logger.info("Installed event handler _on_scene_save..")
logger.info("Installed event handler _before_scene_save..")
logger.info("Installed event handler _on_scene_new..")
logger.info("Installed event handler _on_maya_initialized..")
logger.info("Installed event handler _on_scene_open..")
def _register_events():
api.on("taskChanged", _on_task_changed)
logger.info("Installed event callback for 'taskChanged'..")
def _on_maya_initialized(*args):
api.emit("init", args)
if cmds.about(batch=True):
logger.warning("Running batch mode ...")
return
# Keep reference to the main Window, once a main window exists.
get_main_window()
def _on_scene_new(*args):
api.emit("new", args)
def _on_scene_save(*args):
api.emit("save", args)
def _on_scene_open(*args):
api.emit("open", args)
def _before_scene_save(return_code, client_data):
# Default to allowing the action. Registered
# callbacks can optionally set this to False
# in order to block the operation.
OpenMaya.MScriptUtil.setBool(return_code, True)
api.emit("before_save", [return_code, client_data])
def _on_task_changed(*args):
_update_menu_task_label()
workdir = api.Session["AVALON_WORKDIR"]
if os.path.exists(workdir):
logger.info("Updating Maya workspace for task change to %s", workdir)
_set_project()
# Set Maya fileDialog's start-dir to /scenes
frule_scene = cmds.workspace(fileRuleEntry="scene")
cmds.optionVar(stringValue=("browserLocationmayaBinaryscene",
workdir + "/" + frule_scene))
else:
logger.warning("Can't set project for new context because "
"path does not exist: %s", workdir)
|
{
"content_hash": "c270bf19dd380f69ebbdf13e216e0159",
"timestamp": "",
"source": "github",
"line_count": 665,
"max_line_length": 79,
"avg_line_length": 28.007518796992482,
"alnum_prop": 0.6018255033557047,
"repo_name": "mindbender-studio/core",
"id": "ba4005b999ee07f26d3e684bf71114f23bffdf78",
"size": "18625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avalon/maya/pipeline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "190241"
},
{
"name": "Shell",
"bytes": "374"
}
],
"symlink_target": ""
}
|
import mapper
import time
from simplecoremidi import MIDISource, MIDIDestination
from mapper import Note, Tap, LongPress, Controller, Program, MIDIMapper, Keystroke, key, Change, Send, Compare, typename
import sys
import os
# TODO: parse from a file
ACTIONS = {
Note(1): { LongPress(): Keystroke(key.K_F16), Tap(): Keystroke(key.K_F17) },
Note(6): { LongPress(): Keystroke(key.K_F18), Tap(): Keystroke(key.K_F19) },
Note(2): { Tap(): Note(2), LongPress(): Note(12) },
Note(7): { Tap(): Note(7, toggle=True), LongPress(): Note(17) },
Note(3): { Tap(): Note(3, toggle=True), LongPress(): Note(13, toggle=True) },
Note(4): { Tap(): Note(4, toggle=True), LongPress(): Note(14, toggle=True) },
Note(5): { Tap(): Note(5, toggle=True), LongPress(): Note(15, toggle=True) },
Note(8): { Tap(): Note(8, toggle=True), LongPress(): Note(18, toggle=True) },
Note(9): { Tap(): Note(9, toggle=True), LongPress(): Note(19, toggle=True) },
Note(10):{ Tap(): Note(10,toggle=True), LongPress(): Note(20, toggle=True) },
#Note(8): { Tap(): Program(8), LongPress(): Program(18)},
#Note(9): { Tap(): Program(9), LongPress(): Program(19)},
#Note(10): { Tap(): Program(10), LongPress(): Program(20)},
Controller(0x1A): {},
Controller(0x1B): { Change(): Send(control=0x0C) },
Controller(12): { Change(): Send(), Compare(lambda m: m.value > 120, duration=1.0): Note(11, toggle=True)},
}
if __name__ == '__main__':
sys.exit(MIDIMapper.main(ACTIONS))
|
{
"content_hash": "11dfc09ee8cbe82602d739abcd8e2cba",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 121,
"avg_line_length": 44.1764705882353,
"alnum_prop": 0.6158455392809588,
"repo_name": "jokester01au/simplecoremidi",
"id": "5e8ec19642f7480da63dbe0977a0db89e4f49c0d",
"size": "1502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simplecoremidi/examples/my_midi_mapper.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9581"
},
{
"name": "Python",
"bytes": "26008"
}
],
"symlink_target": ""
}
|
"""
sentry.plugins.sentry_mail
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
|
{
"content_hash": "94fbce2b43bbc8854b57b15823be8dcc",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 75,
"avg_line_length": 26.285714285714285,
"alnum_prop": 0.6195652173913043,
"repo_name": "chayapan/django-sentry",
"id": "31c6868acf8f88fead44915c0b361d7b3308f6c4",
"size": "184",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/plugins/sentry_mail/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "126130"
},
{
"name": "HTML",
"bytes": "174367"
},
{
"name": "JavaScript",
"bytes": "54696"
},
{
"name": "Makefile",
"bytes": "1867"
},
{
"name": "Python",
"bytes": "1330807"
}
],
"symlink_target": ""
}
|
import json
import logging
import math
import re
import sys
import time
from django import forms
from django.contrib import messages
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import HttpResponse, QueryDict
from django.shortcuts import redirect
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from desktop.appmanager import get_apps_dict
from desktop.context_processors import get_app_name
from desktop.lib.paginator import Paginator
from desktop.lib.django_util import JsonResponse
from desktop.lib.django_util import copy_query_dict, format_preserving_redirect, render
from desktop.lib.django_util import login_notrequired, get_desktop_uri_prefix
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_unicode
from desktop.models import Document
from beeswax.parameterization import find_variables
import beeswax.forms
import beeswax.design
import beeswax.management.commands.beeswax_install_examples
from beeswax import common, data_export, models
from beeswax.models import SavedQuery, QueryHistory
from beeswax.server import dbms
from beeswax.server.dbms import expand_exception, get_query_server_config, QueryServerException
LOG = logging.getLogger(__name__)
def index(request):
return execute_query(request)
"""
Design views
"""
def save_design(request, form, type_, design, explicit_save):
"""
save_design(request, form, type_, design, explicit_save) -> SavedQuery
A helper method to save the design:
* If ``explicit_save``, then we save the data in the current design.
* If the user clicked the submit button, we do NOT overwrite the current
design. Instead, we create a new "auto" design (iff the user modified
the data). This new design is named after the current design, with the
AUTO_DESIGN_SUFFIX to signify that it's different.
Need to return a SavedQuery because we may end up with a different one.
Assumes that form.saveform is the SaveForm, and that it is valid.
"""
authorized_get_design(request, design.id)
assert form.saveform.is_valid()
sub_design_form = form # Beeswax/Impala case
if type_ == models.HQL:
design_cls = beeswax.design.HQLdesign
elif type_ == models.IMPALA:
design_cls = beeswax.design.HQLdesign
elif type_ == models.SPARK:
from spark.design import SparkDesign
design_cls = SparkDesign
sub_design_form = form.query
else:
raise ValueError(_('Invalid design type %(type)s') % {'type': type_})
design_obj = design_cls(sub_design_form, query_type=type_)
name = form.saveform.cleaned_data['name']
desc = form.saveform.cleaned_data['desc']
return _save_design(request.user, design, type_, design_obj, explicit_save, name, desc)
def _save_design(user, design, type_, design_obj, explicit_save, name=None, desc=None):
# Design here means SavedQuery
old_design = design
new_data = design_obj.dumps()
# Auto save if (1) the user didn't click "save", and (2) the data is different.
# Create an history design if the user is executing a shared design.
# Don't generate an auto-saved design if the user didn't change anything.
if explicit_save and (not design.doc.exists() or design.doc.get().can_write_or_exception(user)):
design.name = name
design.desc = desc
design.is_auto = False
elif design_obj != old_design.get_design():
# Auto save iff the data is different
if old_design.id is not None:
# Clone iff the parent design isn't a new unsaved model
design = old_design.clone(new_owner=user)
if not old_design.is_auto:
design.name = old_design.name + models.SavedQuery.AUTO_DESIGN_SUFFIX
else:
design.name = models.SavedQuery.DEFAULT_NEW_DESIGN_NAME
design.is_auto = True
design.name = design.name[:64]
design.type = type_
design.data = new_data
design.save()
LOG.info('Saved %s design "%s" (id %s) for %s' % (explicit_save and '' or 'auto ', design.name, design.id, design.owner))
if design.doc.exists():
design.doc.update(name=design.name, description=design.desc)
else:
Document.objects.link(design, owner=design.owner, extra=design.type, name=design.name, description=design.desc)
if design.is_auto:
design.doc.get().add_to_history()
return design
def delete_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id, owner_only=True)) for design_id in ids])
if None in designs.values():
LOG.error('Cannot delete non-existent design(s) %s' % ','.join([key for key, name in designs.items() if name is None]))
return list_designs(request)
for design in designs.values():
if request.POST.get('skipTrash', 'false') == 'false':
design.doc.get().send_to_trash()
else:
design.doc.all().delete()
design.delete()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Delete design(s)?')})
def restore_design(request):
if request.method == 'POST':
ids = request.POST.getlist('designs_selection')
designs = dict([(design_id, authorized_get_design(request, design_id)) for design_id in ids])
if None in designs.values():
LOG.error('Cannot restore non-existent design(s) %s' % ','.join([key for key, name in designs.items() if name is None]))
return list_designs(request)
for design in designs.values():
design.doc.get().restore_from_trash()
return redirect(reverse(get_app_name(request) + ':list_designs'))
else:
return render('confirm.mako', request, {'url': request.path, 'title': _('Restore design(s)?')})
def clone_design(request, design_id):
"""Clone a design belonging to any user"""
design = authorized_get_design(request, design_id)
if design is None:
LOG.error('Cannot clone non-existent design %s' % (design_id,))
return list_designs(request)
copy = design.clone(request.user)
copy.save()
copy_doc = design.doc.get().copy(owner=request.user)
copy.doc.all().delete()
copy.doc.add(copy_doc)
messages.info(request, _('Copied design: %(name)s') % {'name': design.name})
return format_preserving_redirect(request, reverse(get_app_name(request) + ':execute_design', kwargs={'design_id': copy.id}))
def list_designs(request):
"""
View function for show all saved queries.
We get here from /beeswax/list_designs?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show design items belonging to a user. Default to all users.
type=<type> - <type> is "hql", for saved query type. Default to show all.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "name", "desc", and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
text=<frag> - Search for fragment "frag" in names and descriptions.
"""
DEFAULT_PAGE_SIZE = 20
app_name = get_app_name(request)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'type' ] = app_name
page, filter_params = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
return render('list_designs.mako', request, {
'page': page,
'filter_params': filter_params,
'user': request.user,
'designs_json': json.dumps([query.id for query in page.object_list])
})
def list_trashed_designs(request):
DEFAULT_PAGE_SIZE = 20
app_name= get_app_name(request)
user = request.user
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'type' ] = app_name
page, filter_params = _list_designs(user, querydict_query, DEFAULT_PAGE_SIZE, prefix, is_trashed=True)
return render('list_trashed_designs.mako', request, {
'page': page,
'filter_params': filter_params,
'user': request.user,
'designs_json': json.dumps([query.id for query in page.object_list])
})
def my_queries(request):
"""
View a mix of history and saved queries.
It understands all the GET params in ``list_query_history`` (with a ``h-`` prefix)
and those in ``list_designs`` (with a ``q-`` prefix). The only thing it disallows
is the ``user`` filter, since this view only shows what belongs to the user.
"""
DEFAULT_PAGE_SIZE = 30
app_name= get_app_name(request)
# Extract the history list.
prefix = 'h-'
querydict_history = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_history[ prefix + 'user' ] = request.user
querydict_history[ prefix + 'type' ] = app_name
hist_page, hist_filter = _list_query_history(request.user,
querydict_history,
DEFAULT_PAGE_SIZE,
prefix)
# Extract the saved query list.
prefix = 'q-'
querydict_query = _copy_prefix(prefix, request.GET)
# Manually limit up the user filter.
querydict_query[ prefix + 'user' ] = request.user
querydict_query[ prefix + 'type' ] = app_name
query_page, query_filter = _list_designs(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
filter_params = hist_filter
filter_params.update(query_filter)
return render('my_queries.mako', request, {
'request': request,
'h_page': hist_page,
'q_page': query_page,
'filter_params': filter_params,
'designs_json': json.dumps([query.id for query in query_page.object_list])
})
def list_query_history(request):
"""
View the history of query (for the current user).
We get here from /beeswax/query_history?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
user=<name> - Show history items from a user. Default to current user only.
Also accepts ':all' to show all history items.
type=<type> - <type> is "beeswax|impala", for design type. Default to show all.
design_id=<id> - Show history for this particular design id.
sort=<key> - Sort by the attribute <key>, which is one of:
"date", "state", "name" (design name), and "type" (design type)
Accepts the form "-date", which sort in descending order.
Default to "-date".
auto_query=<bool> - Show auto generated actions (drop table, read data, etc). Default True
"""
DEFAULT_PAGE_SIZE = 100
prefix = 'q-'
share_queries = request.user.is_superuser
querydict_query = request.GET.copy()
if not share_queries:
querydict_query[prefix + 'user'] = request.user.username
app_name = get_app_name(request)
querydict_query[prefix + 'type'] = app_name
page, filter_params = _list_query_history(request.user, querydict_query, DEFAULT_PAGE_SIZE, prefix)
filter = request.GET.get(prefix + 'search') and request.GET.get(prefix + 'search') or ''
if request.GET.get('format') == 'json':
resp = {
'queries': [massage_query_history_for_json(app_name, query_history) for query_history in page.object_list]
}
return JsonResponse(resp)
return render('list_history.mako', request, {
'request': request,
'page': page,
'filter_params': filter_params,
'share_queries': share_queries,
'prefix': prefix,
'filter': filter,
})
def massage_query_history_for_json(app_name, query_history):
return {
'query': escape(query_history.query),
'timeInMs': time.mktime(query_history.submission_date.timetuple()),
'timeFormatted': query_history.submission_date.strftime("%x %X"),
'designUrl': reverse(app_name + ':execute_design', kwargs={'design_id': query_history.design.id}),
'resultsUrl': not query_history.is_failure() and reverse(app_name + ':watch_query_history', kwargs={'query_history_id': query_history.id}) or ""
}
def download(request, id, format):
try:
query_history = authorized_get_query_history(request, id, must_exist=True)
db = dbms.get(request.user, query_history.get_query_server_config())
LOG.debug('Download results for query %s: [ %s ]' % (query_history.server_id, query_history.query))
return data_export.download(query_history.get_handle(), format, db)
except Exception, e:
if not hasattr(e, 'message') or not e.message:
message = e
else:
message = e.message
raise PopupException(message, detail='')
"""
Queries Views
"""
def execute_query(request, design_id=None, query_history_id=None):
"""
View function for executing an arbitrary query.
"""
action = 'query'
if query_history_id:
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
design = query_history.design
try:
if query_history.server_id and query_history.server_guid:
handle, state = _get_query_handle_and_state(query_history)
if 'on_success_url' in request.GET:
if request.GET.get('on_success_url'):
action = 'watch-redirect'
else:
action = 'watch-results'
else:
action = 'editor-results'
except QueryServerException, e:
if 'Invalid query handle' in e.message or 'Invalid OperationHandle' in e.message:
query_history.save_state(QueryHistory.STATE.expired)
LOG.warn("Invalid query handle", exc_info=sys.exc_info())
action = 'editor-expired-results'
else:
raise e
else:
# Check perms.
authorized_get_design(request, design_id)
app_name = get_app_name(request)
query_type = SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
query_history = None
doc = design and design.id and design.doc.get()
context = {
'design': design,
'query': query_history, # Backward
'query_history': query_history,
'autocomplete_base_url': reverse(get_app_name(request) + ':api_autocomplete_databases', kwargs={}),
'autocomplete_base_url_hive': reverse('beeswax:api_autocomplete_databases', kwargs={}),
'can_edit_name': design and design.id and not design.is_auto,
'doc_id': doc and doc.id or -1,
'can_edit': doc and doc.can_write(request.user),
'action': action,
'on_success_url': request.GET.get('on_success_url'),
'has_metastore': 'metastore' in get_apps_dict(request.user)
}
return render('execute.mako', request, context)
def view_results(request, id, first_row=0):
"""
Returns the view for the results of the QueryHistory with the given id.
The query results MUST be ready.
To display query results, one should always go through the execute_query view.
If the result set has has_result_set=False, display an empty result.
If ``first_row`` is 0, restarts (if necessary) the query read. Otherwise, just
spits out a warning if first_row doesn't match the servers conception.
Multiple readers will produce a confusing interaction here, and that's known.
It understands the ``context`` GET parameter. (See execute_query().)
"""
first_row = long(first_row)
start_over = (first_row == 0)
results = type('Result', (object,), {
'rows': 0,
'columns': [],
'has_more': False,
'start_row': 0,
})
data = []
fetch_error = False
error_message = ''
log = ''
columns = []
app_name = get_app_name(request)
query_history = authorized_get_query_history(request, id, must_exist=True)
query_server = query_history.get_query_server_config()
db = dbms.get(request.user, query_server)
handle, state = _get_query_handle_and_state(query_history)
context_param = request.GET.get('context', '')
query_context = parse_query_context(context_param)
# Update the status as expired should not be accessible
expired = state == models.QueryHistory.STATE.expired
# Retrieve query results or use empty result if no result set
try:
if query_server['server_name'] == 'impala' and not handle.has_result_set:
downloadable = False
else:
results = db.fetch(handle, start_over, 100)
data = []
# Materialize and HTML escape results
# TODO: use Number + list comprehension
for row in results.rows():
escaped_row = []
for field in row:
if isinstance(field, (int, long, float, complex, bool)):
if math.isnan(field) or math.isinf(field):
escaped_field = json.dumps(field)
else:
escaped_field = field
elif field is None:
escaped_field = 'NULL'
else:
field = smart_unicode(field, errors='replace') # Prevent error when getting back non utf8 like charset=iso-8859-1
escaped_field = escape(field).replace(' ', ' ')
escaped_row.append(escaped_field)
data.append(escaped_row)
# We display the "Download" button only when we know that there are results:
downloadable = first_row > 0 or data
log = db.get_log(handle)
columns = results.data_table.cols()
except Exception, ex:
fetch_error = True
error_message, log = expand_exception(ex, db, handle)
# Handle errors
error = fetch_error or results is None or expired
context = {
'error': error,
'message': error_message,
'query': query_history,
'results': data,
'columns': columns,
'expected_first_row': first_row,
'log': log,
'hadoop_jobs': app_name != 'impala' and _parse_out_hadoop_jobs(log),
'query_context': query_context,
'can_save': False,
'context_param': context_param,
'expired': expired,
'app_name': app_name,
'next_json_set': None,
'is_finished': query_history.is_finished()
}
if not error:
download_urls = {}
if downloadable:
for format in common.DL_FORMATS:
download_urls[format] = reverse(app_name + ':download', kwargs=dict(id=str(id), format=format))
results.start_row = first_row
context.update({
'id': id,
'results': data,
'has_more': results.has_more,
'next_row': results.start_row + len(data),
'start_row': results.start_row,
'expected_first_row': first_row,
'columns': columns,
'download_urls': download_urls,
'can_save': query_history.owner == request.user,
'next_json_set':
reverse(get_app_name(request) + ':view_results', kwargs={
'id': str(id),
'first_row': results.start_row + len(data)
}
)
+ ('?context=' + context_param or '') + '&format=json'
})
context['columns'] = massage_columns_for_json(columns)
if 'save_form' in context:
del context['save_form']
if 'query' in context:
del context['query']
return JsonResponse(context)
def configuration(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
config_values = dbms.get(request.user, query_server).get_default_configuration(
bool(request.REQUEST.get("include_hadoop", False)))
for value in config_values:
if 'password' in value.key.lower():
value.value = "*" * 10
return render("configuration.mako", request, {'config_values': config_values})
"""
Other views
"""
def install_examples(request):
response = {'status': -1, 'message': ''}
if request.method == 'POST':
try:
app_name = get_app_name(request)
beeswax.management.commands.beeswax_install_examples.Command().handle(app_name=app_name, user=request.user)
response['status'] = 0
except Exception, err:
LOG.exception(err)
response['message'] = str(err)
else:
response['message'] = _('A POST request is required.')
return JsonResponse(response)
@login_notrequired
def query_done_cb(request, server_id):
"""
A callback for query completion notification. When the query is done,
BeeswaxServer notifies us by sending a GET request to this view.
"""
message_template = '<html><head></head>%(message)s<body></body></html>'
message = {'message': 'error'}
try:
query_history = QueryHistory.objects.get(server_id=server_id + '\n')
# Update the query status
query_history.set_to_available()
# Find out details about the query
if not query_history.notify:
message['message'] = 'email_notify is false'
return HttpResponse(message_template % message)
design = query_history.design
user = query_history.owner
subject = _("Beeswax query completed.")
if design:
subject += ": %s" % (design.name,)
link = "%s%s" % \
(get_desktop_uri_prefix(),
reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id}))
body = _("%(subject)s. See the results here: %(link)s\n\nQuery:\n%(query)s") % {
'subject': subject, 'link': link, 'query': query_history.query
}
user.email_user(subject, body)
message['message'] = 'sent'
except Exception, ex:
msg = "Failed to send query completion notification via e-mail: %s" % (ex)
LOG.error(msg)
message['message'] = msg
return HttpResponse(message_template % message)
"""
Utils
"""
def massage_columns_for_json(cols):
massaged_cols = []
for column in cols:
massaged_cols.append({
'name': column.name,
'type': column.type,
'comment': column.comment
})
return massaged_cols
def authorized_get_design(request, design_id, owner_only=False, must_exist=False):
if design_id is None and not must_exist:
return None
try:
design = SavedQuery.objects.get(id=design_id)
except SavedQuery.DoesNotExist:
if must_exist:
raise PopupException(_('Design %(id)s does not exist.') % {'id': design_id})
else:
return None
if owner_only:
design.doc.get().can_write_or_exception(request.user)
else:
design.doc.get().can_read_or_exception(request.user)
return design
def authorized_get_query_history(request, query_history_id, owner_only=False, must_exist=False):
if query_history_id is None and not must_exist:
return None
try:
query_history = QueryHistory.get(id=query_history_id)
except QueryHistory.DoesNotExist:
if must_exist:
raise PopupException(_('QueryHistory %(id)s does not exist.') % {'id': query_history_id})
else:
return None
# Some queries don't have a design so are not linked to Document Model permission
if query_history.design is None or not query_history.design.doc.exists():
if not request.user.is_superuser and request.user != query_history.owner:
raise PopupException(_('Permission denied to read QueryHistory %(id)s') % {'id': query_history_id})
else:
query_history.design.doc.get().can_read_or_exception(request.user)
return query_history
def safe_get_design(request, design_type, design_id=None):
"""
Return a new design, if design_id is None,
Return the design with the given id and type. If the design is not found,
display a notification and return a new design.
"""
design = None
if design_id is not None:
design = authorized_get_design(request, design_id)
if design is None:
design = SavedQuery(owner=request.user, type=design_type)
return design
def make_parameterization_form(query_str):
"""
Creates a django form on the fly with arguments from the
query.
"""
variables = find_variables(query_str)
if len(variables) > 0:
class Form(forms.Form):
for name in sorted(variables):
locals()[name] = forms.CharField(required=True)
return Form
else:
return None
def execute_directly(request, query, query_server=None,
design=None, on_success_url=None, on_success_params=None,
**kwargs):
"""
execute_directly(request, query_msg, tablename, design) -> HTTP response for execution
This method wraps around dbms.execute_query() to take care of the HTTP response
after the execution.
query
The HQL model Query object.
query_server
To which Query Server to submit the query.
Dictionary with keys: ['server_name', 'server_host', 'server_port'].
design
The design associated with the query.
on_success_url
Where to go after the query is done. The URL handler may expect an option "context" GET
param. (See ``watch_query``.) For advanced usage, on_success_url can be a function, in
which case the on complete URL is the return of:
on_success_url(history_obj) -> URL string
Defaults to the view results page.
on_success_params
Optional params to pass to the on_success_url (in additional to "context").
Note that this may throw a Beeswax exception.
"""
if design is not None:
authorized_get_design(request, design.id)
db = dbms.get(request.user, query_server)
database = query.query.get('database', 'default')
db.use(database)
query_history = db.execute_query(query, design)
watch_url = reverse(get_app_name(request) + ':watch_query_history', kwargs={'query_history_id': query_history.id})
# Prepare the GET params for the watch_url
get_dict = QueryDict(None, mutable=True)
# (1) on_success_url
if on_success_url:
if callable(on_success_url):
on_success_url = on_success_url(query_history)
get_dict['on_success_url'] = on_success_url
# (2) misc
if on_success_params:
get_dict.update(on_success_params)
return format_preserving_redirect(request, watch_url, get_dict)
def _list_designs(user, querydict, page_size, prefix="", is_trashed=False):
"""
_list_designs(user, querydict, page_size, prefix, is_trashed) -> (page, filter_param)
A helper to gather the designs page. It understands all the GET params in
``list_designs``, by reading keys from the ``querydict`` with the given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='last_modified',
name='name',
desc='description',
type='extra',
)
# Trash and security
if is_trashed:
db_queryset = Document.objects.trashed_docs(SavedQuery, user)
else:
db_queryset = Document.objects.available_docs(SavedQuery, user)
# Filter by user
filter_username = querydict.get(prefix + 'user')
if filter_username:
try:
db_queryset = db_queryset.filter(owner=User.objects.get(username=filter_username))
except User.DoesNotExist:
# Don't care if a bad filter term is provided
pass
# Design type
d_type = querydict.get(prefix + 'type')
if d_type and d_type in SavedQuery.TYPES_MAPPING.keys():
db_queryset = db_queryset.filter(extra=str(SavedQuery.TYPES_MAPPING[d_type]))
# Text search
frag = querydict.get(prefix + 'text')
if frag:
db_queryset = db_queryset.filter(Q(name__icontains=frag) | Q(description__icontains=frag))
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
else:
sort_dir, sort_attr = '', sort_key
if not SORT_ATTR_TRANSLATION.has_key(sort_attr):
LOG.warn('Bad parameter to list_designs: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr])
designs = [job.content_object for job in db_queryset.all() if job.content_object and job.content_object.is_auto == False]
pagenum = int(querydict.get(prefix + 'page', 1))
paginator = Paginator(designs, page_size)
page = paginator.page(pagenum)
# We need to pass the parameters back to the template to generate links
keys_to_copy = [ prefix + key for key in ('user', 'type', 'sort') ]
filter_params = copy_query_dict(querydict, keys_to_copy)
return page, filter_params
def _get_query_handle_and_state(query_history):
"""
Front-end wrapper to handle exceptions. Expects the query to be submitted.
"""
handle = query_history.get_handle()
if handle is None:
raise PopupException(_("Failed to retrieve query state from the Query Server."))
state = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(handle)
if state is None:
raise PopupException(_("Failed to contact Server to check query status."))
return (handle, state)
def parse_query_context(context):
"""
parse_query_context(context) -> ('table', <table_name>) -or- ('design', <design_obj>)
"""
if not context:
return None
pair = context.split(':', 1)
if len(pair) != 2 or pair[0] not in ('table', 'design'):
LOG.error("Invalid query context data: %s" % (context,))
return None
if pair[0] == 'design': # Translate design id to design obj
pair[1] = models.SavedQuery.get(int(pair[1]))
return pair
HADOOP_JOBS_RE = re.compile("Starting Job = ([a-z0-9_]+?),")
def _parse_out_hadoop_jobs(log):
"""
Ideally, Hive would tell us what jobs it has run directly from the Thrift interface.
"""
ret = []
for match in HADOOP_JOBS_RE.finditer(log):
job_id = match.group(1)
if job_id not in ret:
ret.append(job_id)
return ret
def _copy_prefix(prefix, base_dict):
"""Copy keys starting with ``prefix``"""
querydict = QueryDict(None, mutable=True)
for key, val in base_dict.iteritems():
if key.startswith(prefix):
querydict[key] = val
return querydict
def _list_query_history(user, querydict, page_size, prefix=""):
"""
_list_query_history(user, querydict, page_size, prefix) -> (page, filter_param)
A helper to gather the history page. It understands all the GET params in
``list_query_history``, by reading keys from the ``querydict`` with the
given ``prefix``.
"""
DEFAULT_SORT = ('-', 'date') # Descending date
SORT_ATTR_TRANSLATION = dict(
date='submission_date',
state='last_state',
name='design__name',
type='design__type',
)
db_queryset = models.QueryHistory.objects.select_related()
# Filtering
#
# Queries without designs are the ones we submitted on behalf of the user,
# (e.g. view table data). Exclude those when returning query history.
if querydict.get(prefix + 'auto_query', 'on') != 'on':
db_queryset = db_queryset.exclude(design__isnull=False, design__is_auto=True)
user_filter = querydict.get(prefix + 'user', user.username)
if user_filter != ':all':
db_queryset = db_queryset.filter(owner__username=user_filter)
# Design id
design_id = querydict.get(prefix + 'design_id')
if design_id:
db_queryset = db_queryset.filter(design__id=int(design_id))
# Search
search_filter = querydict.get(prefix + 'search')
if search_filter:
db_queryset = db_queryset.filter(Q(design__name__icontains=search_filter) | Q(query__icontains=search_filter) | Q(owner__username__icontains=search_filter))
# Design type
d_type = querydict.get(prefix + 'type')
if d_type:
if d_type not in SavedQuery.TYPES_MAPPING.keys():
LOG.warn('Bad parameter to list_query_history: type=%s' % (d_type,))
else:
db_queryset = db_queryset.filter(design__type=SavedQuery.TYPES_MAPPING[d_type])
# Ordering
sort_key = querydict.get(prefix + 'sort')
if sort_key:
sort_dir, sort_attr = '', sort_key
if sort_key[0] == '-':
sort_dir, sort_attr = '-', sort_key[1:]
if not SORT_ATTR_TRANSLATION.has_key(sort_attr):
LOG.warn('Bad parameter to list_query_history: sort=%s' % (sort_key,))
sort_dir, sort_attr = DEFAULT_SORT
else:
sort_dir, sort_attr = DEFAULT_SORT
db_queryset = db_queryset.order_by(sort_dir + SORT_ATTR_TRANSLATION[sort_attr], '-id')
# Get the total return count before slicing
total_count = db_queryset.count()
# Slicing (must be the last filter applied)
pagenum = int(querydict.get(prefix + 'page', 1))
if pagenum < 1:
pagenum = 1
db_queryset = db_queryset[ page_size * (pagenum - 1) : page_size * pagenum ]
paginator = Paginator(db_queryset, page_size, total=total_count)
page = paginator.page(pagenum)
# We do slicing ourselves, rather than letting the Paginator handle it, in order to
# update the last_state on the running queries
for history in page.object_list:
_update_query_state(history.get_full_object())
# We need to pass the parameters back to the template to generate links
keys_to_copy = [ prefix + key for key in ('user', 'type', 'sort', 'design_id', 'auto_query', 'search') ]
filter_params = copy_query_dict(querydict, keys_to_copy)
return page, filter_params
def _update_query_state(query_history):
"""
Update the last_state for a QueryHistory object. Returns success as True/False.
This only occurs iff the current last_state is submitted or running, since the other
states are stable, more-or-less.
Note that there is a transition from available/failed to expired. That occurs lazily
when the user attempts to view results that have expired.
"""
if query_history.last_state <= models.QueryHistory.STATE.running.index:
try:
state_enum = dbms.get(query_history.owner, query_history.get_query_server_config()).get_state(query_history.get_handle())
if state_enum is None:
# Error was logged at the source
return False
except Exception, e:
LOG.error(e)
state_enum = models.QueryHistory.STATE.failed
query_history.save_state(state_enum)
return True
def get_db_choices(request):
app_name = get_app_name(request)
query_server = get_query_server_config(app_name)
db = dbms.get(request.user, query_server)
dbs = db.get_databases()
return [(db, db) for db in dbs]
WHITESPACE = re.compile("\s+", re.MULTILINE)
def collapse_whitespace(s):
return WHITESPACE.sub(" ", s).strip()
|
{
"content_hash": "8dc62b31c06a27abc62aaa30018ae727",
"timestamp": "",
"source": "github",
"line_count": 998,
"max_line_length": 160,
"avg_line_length": 34.13226452905812,
"alnum_prop": 0.6680072804133396,
"repo_name": "dulems/hue",
"id": "bb73229284277532c3db8f8b56f946e7209cc6e6",
"size": "34856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/beeswax/src/beeswax/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10724"
},
{
"name": "C",
"bytes": "1717475"
},
{
"name": "C++",
"bytes": "173822"
},
{
"name": "CSS",
"bytes": "415919"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "21211447"
},
{
"name": "Java",
"bytes": "133829"
},
{
"name": "JavaScript",
"bytes": "2548634"
},
{
"name": "Makefile",
"bytes": "91979"
},
{
"name": "Mako",
"bytes": "2031792"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "29796239"
},
{
"name": "Scala",
"bytes": "67440"
},
{
"name": "Shell",
"bytes": "28255"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "129526"
},
{
"name": "Thrift",
"bytes": "100994"
},
{
"name": "XSLT",
"bytes": "342237"
}
],
"symlink_target": ""
}
|
from blinker import Namespace
_signals = Namespace()
get_cloners = _signals.signal('get_cloners', """
Expected to return one or more ``EventCloner`` subclasses implementing
a cloning operation for something within an event.
""")
management_url = _signals.signal('management-url', """
Expected to return a URL for the event management page of the plugin.
This is used when someone who does not have event management access wants
to go to the event management area. He is then redirected to one of the URLs
returned by plugins, i.e. it is not guaranteed that the user ends up on a
specific plugin's management page. The signal should return None if the current
user (available via ``session.user``) cannot access the management area.
The *sender* is the event object.
""")
image_created = _signals.signal('image-uploaded', """
Called when a new image is created. The *sender* object is the new ``ImageFile``.
The user who uploaded the image is passed in the ``user`` kwarg.
""")
image_deleted = _signals.signal('image-deleted', """
Called when an image is deleted. The *sender* object is the ``ImageFile`` that is
about to be deleted. The user who uploaded the image is passed in the ``user``
kwarg.
""")
|
{
"content_hash": "3b60698522f5e1dcd4db308fd2c7810d",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 82,
"avg_line_length": 39.12903225806452,
"alnum_prop": 0.7436108821104699,
"repo_name": "mvidalgarcia/indico",
"id": "19a3c881c9f38580e8675faa3a4984c61ad1122c",
"size": "1427",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/core/signals/event_management.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "538590"
},
{
"name": "HTML",
"bytes": "1345380"
},
{
"name": "JavaScript",
"bytes": "1781971"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4381847"
},
{
"name": "Shell",
"bytes": "3568"
},
{
"name": "TeX",
"bytes": "22182"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mturk', '0006_auto_20160129_2154'),
]
operations = [
migrations.AlterField(
model_name='mturkhit',
name='status',
field=models.IntegerField(choices=[(1, 'In Progress'), (2, 'Completed'), (3, 'Done on Daemo'), (4, 'Expired')], default=1),
),
]
|
{
"content_hash": "387949f77a20f22380066e1ff3b27528",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 135,
"avg_line_length": 25.72222222222222,
"alnum_prop": 0.5853131749460043,
"repo_name": "aginzberg/crowdsource-platform",
"id": "0128c729c1b5e2fe5ef3cbe96e99f28a0f4faff2",
"size": "533",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop2",
"path": "mturk/migrations/0007_auto_20160129_2321.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "328687"
},
{
"name": "HTML",
"bytes": "178994"
},
{
"name": "JavaScript",
"bytes": "168588"
},
{
"name": "Python",
"bytes": "339941"
},
{
"name": "Shell",
"bytes": "838"
}
],
"symlink_target": ""
}
|
import scrapy
import re
from locations.items import GeojsonPointItem
class ShopkoSpider(scrapy.Spider):
name = "surlatable"
allowed_domains = ['surlatable.com']
start_urls = (
'https://www.surlatable.com/storeHome.xml',
)
def parse(self, response):
response.selector.remove_namespaces()
city_urls = response.xpath('//url/loc/text()').extract()
for path in city_urls:
yield scrapy.Request(
path.strip(),
callback=self.parse_store,
)
def parse_store(self, response):
if response.xpath('//fieldset[@class="storehours left"]/dl').extract_first():
storeHoursHTML = str(response.xpath('//fieldset[@class="storehours left"]/dl').extract_first())
p = re.compile(r'<.*?>')
storeHours = p.sub(' ', storeHoursHTML)
storeHours = storeHours.strip()
else:
storeHours = response.xpath('//fieldset[@class="storehours left"]/dl').extract_first()
properties = {
'name': response.xpath('//h2[@class="name"]/text()').extract_first(),
'website': response.request.url,
'ref': response.xpath('//h2[@class="name"]/text()').extract_first(),
'addr_full': response.xpath('//fieldset[@class]/dl/dd/text()').extract_first().strip(),
'city': response.xpath('//fieldset[@class]/dl/dd/text()').extract()[1].strip().split(',')[0],
'state': response.xpath('//fieldset[@class]/dl/dd/text()').extract()[1].split(',')[1].strip().replace('\xa0',' ').split()[0],
'postcode': response.xpath('//fieldset[@class]/dl/dd/text()').extract()[1].split(',')[1].strip().replace('\xa0',' ').split()[1],
'opening_hours': storeHours,
}
yield GeojsonPointItem(**properties)
|
{
"content_hash": "406e5c4cba0d17fc2f783b72b4531fc5",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 140,
"avg_line_length": 40.82222222222222,
"alnum_prop": 0.5737615677735438,
"repo_name": "iandees/all-the-places",
"id": "c09f460849bfae86d67430d251fb5f8cea96e837",
"size": "1861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "locations/spiders/surlatable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2134"
},
{
"name": "Python",
"bytes": "116132"
},
{
"name": "Shell",
"bytes": "4477"
}
],
"symlink_target": ""
}
|
class Graph:
"""
Data structure to store graphs (based on adjacency lists)
"""
def __init__(self):
self.num_vertices = 0
self.num_edges = 0
self.adjacency = {}
def add_vertex(self, vertex):
"""
Adds a vertex to the graph
"""
if vertex not in self.adjacency:
self.adjacency[vertex] = {}
self.num_vertices += 1
def add_edge(self, head, tail, weight):
"""
Adds an edge to the graph
"""
self.add_vertex(head)
self.add_vertex(tail)
if head == tail:
return
self.adjacency[head][tail] = weight
self.adjacency[tail][head] = weight
def distinct_weight(self):
"""
For Boruvks's algorithm the weights should be distinct
Converts the weights to be distinct
"""
edges = self.get_edges()
for edge in edges:
head, tail, weight = edge
edges.remove((tail, head, weight))
for i in range(len(edges)):
edges[i] = list(edges[i])
edges.sort(key=lambda e: e[2])
for i in range(len(edges) - 1):
if edges[i][2] >= edges[i + 1][2]:
edges[i + 1][2] = edges[i][2] + 1
for edge in edges:
head, tail, weight = edge
self.adjacency[head][tail] = weight
self.adjacency[tail][head] = weight
def __str__(self):
"""
Returns string representation of the graph
"""
string = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
weight = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def get_edges(self):
"""
Returna all edges in the graph
"""
output = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def get_vertices(self):
"""
Returns all vertices in the graph
"""
return self.adjacency.keys()
@staticmethod
def build(vertices=None, edges=None):
"""
Builds a graph from the given set of vertices and edges
"""
g = Graph()
if vertices is None:
vertices = []
if edges is None:
edge = []
for vertex in vertices:
g.add_vertex(vertex)
for edge in edges:
g.add_edge(*edge)
return g
class UnionFind:
"""
Disjoint set Union and Find for Boruvka's algorithm
"""
def __init__(self):
self.parent = {}
self.rank = {}
def __len__(self):
return len(self.parent)
def make_set(self, item):
if item in self.parent:
return self.find(item)
self.parent[item] = item
self.rank[item] = 0
return item
def find(self, item):
if item not in self.parent:
return self.make_set(item)
if item != self.parent[item]:
self.parent[item] = self.find(self.parent[item])
return self.parent[item]
def union(self, item1, item2):
root1 = self.find(item1)
root2 = self.find(item2)
if root1 == root2:
return root1
if self.rank[root1] > self.rank[root2]:
self.parent[root2] = root1
return root1
if self.rank[root1] < self.rank[root2]:
self.parent[root1] = root2
return root2
if self.rank[root1] == self.rank[root2]:
self.rank[root1] += 1
self.parent[root2] = root1
return root1
@staticmethod
def boruvka_mst(graph):
"""
Implementation of Boruvka's algorithm
>>> g = Graph()
>>> g = Graph.build([0, 1, 2, 3], [[0, 1, 1], [0, 2, 1],[2, 3, 1]])
>>> g.distinct_weight()
>>> bg = Graph.boruvka_mst(g)
>>> print(bg)
1 -> 0 == 1
2 -> 0 == 2
0 -> 1 == 1
0 -> 2 == 2
3 -> 2 == 3
2 -> 3 == 3
"""
num_components = graph.num_vertices
union_find = Graph.UnionFind()
mst_edges = []
while num_components > 1:
cheap_edge = {}
for vertex in graph.get_vertices():
cheap_edge[vertex] = -1
edges = graph.get_edges()
for edge in edges:
head, tail, weight = edge
edges.remove((tail, head, weight))
for edge in edges:
head, tail, weight = edge
set1 = union_find.find(head)
set2 = union_find.find(tail)
if set1 != set2:
if cheap_edge[set1] == -1 or cheap_edge[set1][2] > weight:
cheap_edge[set1] = [head, tail, weight]
if cheap_edge[set2] == -1 or cheap_edge[set2][2] > weight:
cheap_edge[set2] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
head, tail, weight = cheap_edge[vertex]
if union_find.find(head) != union_find.find(tail):
union_find.union(head, tail)
mst_edges.append(cheap_edge[vertex])
num_components = num_components - 1
mst = Graph.build(edges=mst_edges)
return mst
|
{
"content_hash": "02e3b44e60a2fa7b333b1a2084d7aa80",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 78,
"avg_line_length": 30.071428571428573,
"alnum_prop": 0.4614862572107228,
"repo_name": "TheAlgorithms/Python",
"id": "6c72615cc729d7e00320f5b49c48cac74d256f3f",
"size": "5894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphs/minimum_spanning_tree_boruvka.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2601694"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError, transaction
from django.db.models import Count
from junction.devices.models import Device
from junction.schedule.models import ScheduleItem, ScheduleItemType
from .models import (
ChoiceFeedbackQuestion,
ChoiceFeedbackQuestionValue,
ScheduleItemChoiceFeedback,
ScheduleItemTextFeedback,
TextFeedbackQuestion,
)
COLORS = ["#46BFBD", "#FDB45C", "#F7464A"]
def get_feedback_questions(conference_id):
"""Get all feedback questions for the conference.
{'talk': {'Text': [{'id': 1, 'title': 'How was the speaker ?',
'is_required': True}], 'Workshop': [{'id': 1,
'title': 'How was the content ?', 'is_required': True,
allowed_values: [{'title': 'Awesome', 'id': 2},
{'title': 'Bad', 'id': 3}, {'title': 'Ok', 'id': 4}}]}]
}}
"""
text_questions = get_text_feedback_questions(conference_id=conference_id)
choice_questions = get_choice_feedback_questions(conference_id=conference_id)
return _merge_questions(
text_questions=text_questions, choice_questions=choice_questions
)
def get_text_feedback_questions(conference_id):
"""Get all text questions for the conference organized by
schedule item type.
Return dict contain all questions with schedule item type in dict.
"""
qs = TextFeedbackQuestion.objects.filter(conference_id=conference_id)
return _get_question_oragnized_by_type(qs)
def get_choice_feedback_questions(conference_id):
"""Get all choice based questions for the conference organized by
schedule item type.
"""
qs = ChoiceFeedbackQuestion.objects.filter(
conference_id=conference_id
).prefetch_related("allowed_values")
return _get_question_oragnized_by_type(qs)
def has_submitted(feedback, device_uuid):
"""
"""
device = Device.objects.get(uuid=device_uuid)
text_feedback = ScheduleItemTextFeedback.objects.filter(
schedule_item_id=feedback.validated_data["schedule_item_id"], device=device
)
if text_feedback:
return True
choice_feedback = ScheduleItemChoiceFeedback.objects.filter(
schedule_item_id=feedback.validated_data["schedule_item_id"], device=device
)
return choice_feedback
def _has_required_ids(master, submitted):
for item in master:
if item not in submitted:
return False
return True
def has_required_fields_data(feedback):
try:
data = feedback.validated_data
sch = ScheduleItem.objects.get(pk=data["schedule_item_id"])
sch_type = ScheduleItemType.objects.get(title=sch.type)
t_ids = TextFeedbackQuestion.objects.filter(
schedule_item_type=sch_type, conference=sch.conference, is_required=True
).values_list("id", flat=True)
if not data.get("text"):
if t_ids:
return False, "Text Feedback is missing"
else:
submitted_t_ids = {d["id"] for d in data.get("text")}
if not _has_required_ids(master=t_ids, submitted=submitted_t_ids):
return False, "Required text questions are missing"
c_ids = ChoiceFeedbackQuestion.objects.filter(
schedule_item_type=sch_type, conference=sch.conference, is_required=True
).values_list("id", flat=True)
if not data.get("choices"):
if c_ids:
return False, "Choice feedback is missing"
else:
submitted_c_ids = {d["id"] for d in data.get("choices")}
if not _has_required_ids(master=c_ids, submitted=submitted_c_ids):
return False, "Choice feedback is missing"
return True, ""
except ObjectDoesNotExist as e:
print(e)
return False
def create(feedback, device_uuid):
device = Device.objects.get(uuid=device_uuid)
schedule_item_id = feedback.validated_data["schedule_item_id"]
try:
with transaction.atomic():
text, choices = [], []
if feedback.validated_data.get("text"):
text = create_text_feedback(
schedule_item_id=schedule_item_id,
feedbacks=feedback.validated_data.get("text"),
device=device,
)
if feedback.validated_data.get("choices"):
choices = create_choice_feedback(
schedule_item_id=schedule_item_id,
feedbacks=feedback.validated_data.get("choices"),
device=device,
)
return {"text": text, "choices": choices}
except (IntegrityError, ObjectDoesNotExist) as e:
print(e) # Replace with log
return False
def create_text_feedback(schedule_item_id, feedbacks, device):
text = []
for feedback in feedbacks:
obj = ScheduleItemTextFeedback.objects.create(
schedule_item_id=schedule_item_id,
question_id=feedback["id"],
text=feedback["text"],
device=device,
)
d = {
"id": obj.id,
"text": obj.text,
"question_id": feedback["id"],
"schedule_item_id": schedule_item_id,
}
text.append(d)
return text
def create_choice_feedback(schedule_item_id, feedbacks, device):
choices = []
for feedback in feedbacks:
value = ChoiceFeedbackQuestionValue.objects.get(
question_id=feedback["id"], id=feedback["value_id"]
)
obj = ScheduleItemChoiceFeedback.objects.create(
schedule_item_id=schedule_item_id,
device=device,
question_id=feedback["id"],
value=value.value,
)
d = {
"id": obj.id,
"value_id": value.id,
"question_id": feedback["id"],
"schedule_item_id": schedule_item_id,
}
choices.append(d)
return choices
def get_feedback(schedule_item):
feedback = {
"text": _get_text_feedback(schedule_item=schedule_item),
"choices": _get_choice_feedback(schedule_item=schedule_item),
}
return feedback
def _get_text_feedback(schedule_item):
questions = TextFeedbackQuestion.objects.filter(
schedule_item_type__title=schedule_item.type
)
text = [
{
"question": question,
"values": ScheduleItemTextFeedback.objects.filter(
question=question, schedule_item=schedule_item
),
}
for question in questions
]
return text
def _get_choice_feedback(schedule_item):
questions = ChoiceFeedbackQuestion.objects.filter(
schedule_item_type__title=schedule_item.type
).select_related("allowed_values")
choices = []
for question in questions:
values = (
ScheduleItemChoiceFeedback.objects.filter(
schedule_item=schedule_item, question=question
)
.values("value")
.annotate(Count("value"))
)
d = {
"question": question,
"values": _get_choice_value_for_chart(question=question, values=values),
}
choices.append(d)
return choices
def _get_choice_value_for_chart(question, values):
data = []
for index, value in enumerate(values):
d = {"label": str(question.allowed_values.get(value=value["value"]).title)}
d["value"] = value["value__count"]
d["color"] = COLORS[index]
data.append(d)
return data
def _get_question_oragnized_by_type(qs):
questions = defaultdict(list)
for question in qs:
questions[question.schedule_item_type.title].append(question.to_response())
return questions
def _merge_questions(text_questions, choice_questions):
"""Merge the choice and text based questions into schedule type
{'Talk': {'text': [..], 'choice': [...]},}
"""
types = set(text_questions.keys())
types.union(list(choice_questions.keys()))
questions = {}
for item in types:
questions[item] = {
"text": text_questions.get(item),
"choice": choice_questions.get(item),
}
return questions
|
{
"content_hash": "f203a0ad3eef3468989f9368a1e58137",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 84,
"avg_line_length": 32.2734375,
"alnum_prop": 0.6146211571048172,
"repo_name": "pythonindia/junction",
"id": "0d06d8af41067439cceed5fbab3890256142f453",
"size": "8287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "junction/feedback/service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190844"
},
{
"name": "HTML",
"bytes": "161794"
},
{
"name": "JavaScript",
"bytes": "49000"
},
{
"name": "Python",
"bytes": "379163"
},
{
"name": "Shell",
"bytes": "595"
}
],
"symlink_target": ""
}
|
"""An interactive Python prompt in the Porcupine process, accessible from the "Run" menu.
Unlike a normal ">>>" prompt, the one here lets you run commands that affect
the current Porcupine instance. You can e.g. access the opened tabs.
For example, this sets the color of the last tab:
>>> get_tab_manager().tabs()[-1].textwidget['bg'] = 'green'
This plugin is somewhat buggy and annoying to use, but it's still occasionally
useful when developing Porcupine.
"""
from __future__ import annotations
import contextlib
import io
import tkinter
import traceback
from tkinter import ttk
from typing import Any
from porcupine import get_tab_manager, menubar, tabs, textutils
# In "Run" menu, get the important stuff first
setup_after = ["run"]
class PromptTab(tabs.Tab):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.title_choices = ["Porcupine Debug Prompt"]
self.namespace: dict[str, Any] = {}
exec("from porcupine import *", self.namespace)
self.textwidget = tkinter.Text(self, width=1, height=1)
self.textwidget.pack(side="left", fill="both", expand=True)
self.textwidget.mark_set("output_end", "end")
self.textwidget.mark_gravity("output_end", "left")
self.show(">>> from porcupine import *\n>>> ")
textutils.use_pygments_tags(self.textwidget)
self.scrollbar = ttk.Scrollbar(self)
self.scrollbar.pack(side="left", fill="y")
self.textwidget.config(yscrollcommand=self.scrollbar.set)
self.scrollbar.config(command=self.textwidget.yview)
self.bind("<<TabSelected>>", (lambda event: self.textwidget.focus()), add=True)
self.textwidget.bind("<Return>", self.on_enter_key, add=True)
def show(self, string: str) -> None:
self.textwidget.insert("end - 1 char", string)
self.textwidget.mark_set("output_end", "end - 1 char")
self.textwidget.mark_set("insert", "end - 1 char")
self.textwidget.see("insert")
def on_focus(self) -> None:
self.textwidget.focus_set()
def on_enter_key(self, event: object = None) -> str:
code_string = self.textwidget.get("output_end", "end - 1 char")
out = io.StringIO()
with contextlib.redirect_stdout(out), contextlib.redirect_stderr(out):
try:
code = compile(code_string, "<prompt>", "single")
exec(code, self.namespace)
except Exception:
traceback.print_exc()
self.show(f"\n{out.getvalue()}>>> ")
return "break"
def start_prompt() -> None:
for tab in get_tab_manager().tabs():
if isinstance(tab, PromptTab):
get_tab_manager().select(tab)
return
get_tab_manager().add_tab(PromptTab(get_tab_manager()))
def setup() -> None:
menubar.get_menu("Run").add_separator()
menubar.get_menu("Run").add_command(label="Porcupine debug prompt", command=start_prompt)
|
{
"content_hash": "9941c747c6688778037e55da93d32bbb",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 93,
"avg_line_length": 36.451219512195124,
"alnum_prop": 0.6423553027768485,
"repo_name": "Akuli/porcupine",
"id": "95fc08f09d0963c6de3460b18433de468d29203f",
"size": "2989",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "porcupine/plugins/porcupine_debug_prompt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1559"
},
{
"name": "NSIS",
"bytes": "5872"
},
{
"name": "Python",
"bytes": "715408"
},
{
"name": "Shell",
"bytes": "748"
},
{
"name": "Tcl",
"bytes": "7994"
}
],
"symlink_target": ""
}
|
from yambopy import *
from itertools import product
def calculate_distances(kpoints):
"""
take a list of k-points and calculate the distances between all of them
"""
kpoints = np.array(kpoints)
distances = [0]
distance = 0
for nk in range(1,len(kpoints)):
distance += np.linalg.norm(kpoints[nk-1]-kpoints[nk])
distances.append(distance)
return distances
def expand_kpts(kpts,syms):
"""
Take a list of qpoints and symmetry operations and return the full brillouin zone
with the corresponding index in the irreducible brillouin zone
"""
full_kpts = []
print("nkpoints:", len(kpts))
for nk,k in enumerate(kpts):
for sym in syms:
full_kpts.append((nk,np.dot(sym,k)))
return full_kpts
def vec_in_list(veca,vec_list,atol=1e-6):
"""
Check if a vector exists in a list of vectors
"""
return np.array([ np.allclose(veca,vecb,rtol=atol,atol=atol) for vecb in vec_list ]).any()
def isbetween(a,b,c,eps=1e-5):
""" Check if c is between a and b
"""
return np.isclose(np.linalg.norm(a-c)+np.linalg.norm(b-c)-np.linalg.norm(a-b),0,atol=eps)
def red_car(red,lat):
"""
Convert reduced coordinates to cartesian
"""
return np.array([coord[0]*lat[0]+coord[1]*lat[1]+coord[2]*lat[2] for coord in red])
def car_red(car,lat):
"""
Convert cartesian coordinates to reduced
"""
return np.array([np.linalg.solve(np.array(lat).T,coord) for coord in car])
def vol_lat(lat):
"""
Calculate the volume of a lattice
"""
a1,a2,a3 = np.array(lat)
return np.dot(a1,np.cross(a2,a3))
def rec_lat(lat):
"""
Calculate the reciprocal lattice vectors
"""
v = vol_lat(lat)
a1,a2,a3 = np.array(lat)
b1 = np.cross(a2,a3)/v
b2 = np.cross(a3,a1)/v
b3 = np.cross(a1,a2)/v
return np.array([b1,b2,b3])
def get_path(kmesh,path,debug=False):
"""
get indexes of the kpoints in the the kmesh
that fall along the path
"""
kmesh = np.array(kmesh)
path = np.array(path)
#find the points along the high symmetry lines
bands_indexes = []
#for all the paths
for k in range(len(path)-1):
# store here all the points in the path
# key: has the coordinates of the kpoint rounded to 4 decimal places
# value: index of the kpoint
# the kpoint cordinate
kpoints_in_path = []
start_kpt = path[k] #start point of the path
end_kpt = path[k+1] #end point of the path
#iterate over all the kpoints
for index, kpt in enumerate(kmesh):
#if the point is collinear we add it
if isbetween(start_kpt,end_kpt,kpt):
value = [ index, np.linalg.norm(start_kpt-kpt), kpt ]
kpoints_in_path.append( value )
#sort the points acoording to distance to the start of the path
kpoints_in_path = sorted(kpoints_in_path,key=lambda i: i[1])
#for all the kpoints in the path
for index, disp, kpt in kpoints_in_path:
bands_indexes.append( index )
if debug: print ("%12.8lf "*3)%tuple(kpt), index
return np.array(bands_indexes)
def replicate_red_kmesh(kmesh,repx=list(range(1)),repy=list(range(1)),repz=list(range(1))):
"""
copy a kmesh in the tree directions
the kmesh has to be in reduced coordinates
"""
kmesh = np.array(kmesh)
kmesh_nkpoints = len(kmesh)
kmesh_full = []
kmesh_idx = []
for x,y,z in product(repx,repy,repz):
kmesh_shift = kmesh + np.array([x,y,z])
kmesh_full.append(kmesh_shift)
kmesh_idx.append(list(range(kmesh_nkpoints)))
return np.vstack(kmesh_full), np.hstack(kmesh_idx)
def point_matching(a,b,double_check=True,debug=False,eps=1e-8):
"""
Matches the points of list a to the points of list b
using a nearest neighbour finding algorithm
Arguments:
double_check: after the nearest neighbours are assigned check further
if the distance between points is within the precision eps
eps: precision for the double check (default: 1e-8)
"""
#karma
from scipy.spatial import cKDTree
from time import time
a = np.array(a)
b = np.array(b)
start_time = time()
#initialize thd kdtree
kdtree = cKDTree(a, leafsize=10)
map_b_to_a = []
for xb in b:
current_dist,index = kdtree.query(xb, k=1, distance_upper_bound=6)
map_b_to_a.append(index)
map_b_to_a = np.array(map_b_to_a)
if debug:
print("took %4.2lfs"%(time()-start_time))
if double_check:
for ib,ia in enumerate(map_b_to_a):
dist = np.linalg.norm(a[ia]-b[ib])
if dist > eps:
raise ValueError('point a %d: %s is far away from points b %d: %s dist: %lf'%(ia,str(a[ia]),ib,str(b[ib]),dist))
return map_b_to_a
|
{
"content_hash": "824074f1242ccab7613726a7673de4df",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 129,
"avg_line_length": 29.407185628742514,
"alnum_prop": 0.6131134188556302,
"repo_name": "alexmoratalla/yambopy",
"id": "9b69fc1da191b3c07913dc813e1bfc049850fc38",
"size": "5020",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "yambopy/lattice.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "887890"
},
{
"name": "Shell",
"bytes": "1296"
}
],
"symlink_target": ""
}
|
__all__ = ['imread']
from numpy import array
def imread(fname, flatten=False):
"""
Load an image from file.
Parameters
----------
fname : str
Image file name, e.g. ``test.jpg``.
flatten : bool, optional
If true, convert the output to grey-scale. Default is False.
Returns
-------
img_array : ndarray
The different colour bands/channels are stored in the
third dimension, such that a grey-image is MxN, an
RGB-image MxNx3 and an RGBA-image MxNx4.
Raises
------
ImportError
If the Python Imaging Library (PIL) can not be imported.
"""
try:
from PIL import Image
except ImportError:
raise ImportError("Could not import the Python Imaging Library (PIL)"
" required to load image files. Please refer to"
" http://pypi.python.org/pypi/PIL/ for installation"
" instructions.")
fp = open(fname, "rb")
im = Image.open(fp)
if flatten:
im = im.convert('F')
result = array(im)
fp.close()
return result
|
{
"content_hash": "2f7f83c4956af98fea87f187d44b621f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 78,
"avg_line_length": 26.325581395348838,
"alnum_prop": 0.5653710247349824,
"repo_name": "lesserwhirls/scipy-cwt",
"id": "20ab6e4471d0315c2509ca46c1b19f9a97dfe469",
"size": "1132",
"binary": false,
"copies": "8",
"ref": "refs/heads/cwt",
"path": "scipy/ndimage/io.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8532454"
},
{
"name": "C++",
"bytes": "6602032"
},
{
"name": "FORTRAN",
"bytes": "5895476"
},
{
"name": "Objective-C",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "4776663"
},
{
"name": "Shell",
"bytes": "1742"
}
],
"symlink_target": ""
}
|
import logging
import optparse
import subprocess
import sys
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.webkit_finder import WebKitFinder
_log = logging.getLogger(__name__)
class Bucket(object):
def __init__(self, tests):
self.tests = tests
def size(self):
return len(self.tests)
class Bisector(object):
def __init__(self, tests, is_debug):
self.executive = Executive()
self.tests = tests
self.expected_failure = tests[-1]
self.is_debug = is_debug
self.webkit_finder = WebKitFinder(FileSystem())
def bisect(self):
if self.test_fails_in_isolation():
self.buckets = [Bucket([self.expected_failure])]
print '%s fails when run in isolation.' % self.expected_failure
self.print_result()
return 0
if not self.test_fails(self.tests):
_log.error('%s does not fail' % self.expected_failure)
return 1
# Split the list of test into buckets. Each bucket has at least one test required to cause
# the expected failure at the end. Split buckets in half until there are only buckets left
# with one item in them.
self.buckets = [Bucket(self.tests[:-1]), Bucket([self.expected_failure])]
while not self.is_done():
self.print_progress()
self.split_largest_bucket()
self.print_result()
self.verify_non_flaky()
return 0
def test_fails_in_isolation(self):
return self.test_bucket_list_fails([Bucket([self.expected_failure])])
def verify_non_flaky(self):
print 'Verifying the failure is not flaky by running 10 times.'
count_failures = 0
for i in range(0, 10):
if self.test_bucket_list_fails(self.buckets):
count_failures += 1
print 'Failed %d/10 times' % count_failures
def print_progress(self):
count = 0
for bucket in self.buckets:
count += len(bucket.tests)
print '%d tests left, %d buckets' % (count, len(self.buckets))
def print_result(self):
tests = []
for bucket in self.buckets:
tests += bucket.tests
extra_args = ' --debug' if self.is_debug else ''
print 'run-webkit-tests%s --child-processes=1 --order=none %s' % (extra_args, " ".join(tests))
def is_done(self):
for bucket in self.buckets:
if bucket.size() > 1:
return False
return True
def split_largest_bucket(self):
index = 0
largest_index = 0
largest_size = 0
for bucket in self.buckets:
if bucket.size() > largest_size:
largest_index = index
largest_size = bucket.size()
index += 1
bucket_to_split = self.buckets[largest_index]
halfway_point = int(largest_size / 2)
first_half = Bucket(bucket_to_split.tests[:halfway_point])
second_half = Bucket(bucket_to_split.tests[halfway_point:])
buckets_before = self.buckets[:largest_index]
buckets_after = self.buckets[largest_index + 1:]
# Do the second half first because it tends to be faster because the http tests are front-loaded and slow.
new_buckets = buckets_before + [second_half] + buckets_after
if self.test_bucket_list_fails(new_buckets):
self.buckets = new_buckets
return
new_buckets = buckets_before + [first_half] + buckets_after
if self.test_bucket_list_fails(new_buckets):
self.buckets = new_buckets
return
self.buckets = buckets_before + [first_half, second_half] + buckets_after
def test_bucket_list_fails(self, buckets):
tests = []
for bucket in buckets:
tests += bucket.tests
return self.test_fails(tests)
def test_fails(self, tests):
extra_args = ['--debug'] if self.is_debug else []
path_to_run_webkit_tests = self.webkit_finder.path_from_webkit_base('Tools', 'Scripts', 'run-webkit-tests')
output = self.executive.popen([path_to_run_webkit_tests, '--child-processes', '1', '--order', 'none', '--no-retry',
'--no-show-results', '--verbose'] + extra_args + tests, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
failure_string = self.expected_failure + ' failed'
if failure_string in output.stderr.read():
return True
return False
def main(argv):
logging.basicConfig()
option_parser = optparse.OptionParser()
option_parser.add_option('--test-list', action='store',
help='file that list tests to bisect. The last test in the list is the expected failure.', metavar='FILE'),
option_parser.add_option('--debug', action='store_true', default=False, help='whether to use a debug build'),
options, args = option_parser.parse_args(argv)
tests = open(options.test_list).read().strip().split('\n')
bisector = Bisector(tests, is_debug=options.debug)
return bisector.bisect()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
{
"content_hash": "08937d02843b1abb18415e0502e3ac29",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 142,
"avg_line_length": 36.72027972027972,
"alnum_prop": 0.6084555322795658,
"repo_name": "axinging/chromium-crosswalk",
"id": "f4fc025607cf1052a0956811207228baa79eb7c2",
"size": "6781",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/bisect_test_ordering.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "8242"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23945"
},
{
"name": "C",
"bytes": "4103204"
},
{
"name": "C++",
"bytes": "225022948"
},
{
"name": "CSS",
"bytes": "949808"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Go",
"bytes": "18155"
},
{
"name": "HTML",
"bytes": "28206993"
},
{
"name": "Java",
"bytes": "7651204"
},
{
"name": "JavaScript",
"bytes": "18831169"
},
{
"name": "Makefile",
"bytes": "96270"
},
{
"name": "Objective-C",
"bytes": "1228122"
},
{
"name": "Objective-C++",
"bytes": "7563676"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "418221"
},
{
"name": "Python",
"bytes": "7855597"
},
{
"name": "Shell",
"bytes": "472586"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
}
|
"""
Statespace Tools
Author: Chad Fulton
License: Simplified-BSD
"""
import numpy as np
from scipy.linalg import solve_sylvester
import pandas as pd
from statsmodels.compat.pandas import Appender
from statsmodels.tools.data import _is_using_pandas
from scipy.linalg.blas import find_best_blas_type
from . import (_initialization, _representation, _kalman_filter,
_kalman_smoother, _simulation_smoother,
_cfa_simulation_smoother, _tools)
compatibility_mode = False
has_trmm = True
prefix_dtype_map = {
's': np.float32, 'd': np.float64, 'c': np.complex64, 'z': np.complex128
}
prefix_initialization_map = {
's': _initialization.sInitialization,
'd': _initialization.dInitialization,
'c': _initialization.cInitialization,
'z': _initialization.zInitialization
}
prefix_statespace_map = {
's': _representation.sStatespace, 'd': _representation.dStatespace,
'c': _representation.cStatespace, 'z': _representation.zStatespace
}
prefix_kalman_filter_map = {
's': _kalman_filter.sKalmanFilter,
'd': _kalman_filter.dKalmanFilter,
'c': _kalman_filter.cKalmanFilter,
'z': _kalman_filter.zKalmanFilter
}
prefix_kalman_smoother_map = {
's': _kalman_smoother.sKalmanSmoother,
'd': _kalman_smoother.dKalmanSmoother,
'c': _kalman_smoother.cKalmanSmoother,
'z': _kalman_smoother.zKalmanSmoother
}
prefix_simulation_smoother_map = {
's': _simulation_smoother.sSimulationSmoother,
'd': _simulation_smoother.dSimulationSmoother,
'c': _simulation_smoother.cSimulationSmoother,
'z': _simulation_smoother.zSimulationSmoother
}
prefix_cfa_simulation_smoother_map = {
's': _cfa_simulation_smoother.sCFASimulationSmoother,
'd': _cfa_simulation_smoother.dCFASimulationSmoother,
'c': _cfa_simulation_smoother.cCFASimulationSmoother,
'z': _cfa_simulation_smoother.zCFASimulationSmoother
}
prefix_pacf_map = {
's': _tools._scompute_coefficients_from_multivariate_pacf,
'd': _tools._dcompute_coefficients_from_multivariate_pacf,
'c': _tools._ccompute_coefficients_from_multivariate_pacf,
'z': _tools._zcompute_coefficients_from_multivariate_pacf
}
prefix_sv_map = {
's': _tools._sconstrain_sv_less_than_one,
'd': _tools._dconstrain_sv_less_than_one,
'c': _tools._cconstrain_sv_less_than_one,
'z': _tools._zconstrain_sv_less_than_one
}
prefix_reorder_missing_matrix_map = {
's': _tools.sreorder_missing_matrix,
'd': _tools.dreorder_missing_matrix,
'c': _tools.creorder_missing_matrix,
'z': _tools.zreorder_missing_matrix
}
prefix_reorder_missing_vector_map = {
's': _tools.sreorder_missing_vector,
'd': _tools.dreorder_missing_vector,
'c': _tools.creorder_missing_vector,
'z': _tools.zreorder_missing_vector
}
prefix_copy_missing_matrix_map = {
's': _tools.scopy_missing_matrix,
'd': _tools.dcopy_missing_matrix,
'c': _tools.ccopy_missing_matrix,
'z': _tools.zcopy_missing_matrix
}
prefix_copy_missing_vector_map = {
's': _tools.scopy_missing_vector,
'd': _tools.dcopy_missing_vector,
'c': _tools.ccopy_missing_vector,
'z': _tools.zcopy_missing_vector
}
prefix_copy_index_matrix_map = {
's': _tools.scopy_index_matrix,
'd': _tools.dcopy_index_matrix,
'c': _tools.ccopy_index_matrix,
'z': _tools.zcopy_index_matrix
}
prefix_copy_index_vector_map = {
's': _tools.scopy_index_vector,
'd': _tools.dcopy_index_vector,
'c': _tools.ccopy_index_vector,
'z': _tools.zcopy_index_vector
}
def set_mode(compatibility=None):
if compatibility:
raise NotImplementedError('Compatibility mode is only available in'
' statsmodels <= 0.9')
def companion_matrix(polynomial):
r"""
Create a companion matrix
Parameters
----------
polynomial : array_like or list
If an iterable, interpreted as the coefficients of the polynomial from
which to form the companion matrix. Polynomial coefficients are in
order of increasing degree, and may be either scalars (as in an AR(p)
model) or coefficient matrices (as in a VAR(p) model). If an integer,
it is interpreted as the size of a companion matrix of a scalar
polynomial, where the polynomial coefficients are initialized to zeros.
If a matrix polynomial is passed, :math:`C_0` may be set to the scalar
value 1 to indicate an identity matrix (doing so will improve the speed
of the companion matrix creation).
Returns
-------
companion_matrix : ndarray
Notes
-----
Given coefficients of a lag polynomial of the form:
.. math::
c(L) = c_0 + c_1 L + \dots + c_p L^p
returns a matrix of the form
.. math::
\begin{bmatrix}
\phi_1 & 1 & 0 & \cdots & 0 \\
\phi_2 & 0 & 1 & & 0 \\
\vdots & & & \ddots & 0 \\
& & & & 1 \\
\phi_n & 0 & 0 & \cdots & 0 \\
\end{bmatrix}
where some or all of the :math:`\phi_i` may be non-zero (if `polynomial` is
None, then all are equal to zero).
If the coefficients provided are scalars :math:`(c_0, c_1, \dots, c_p)`,
then the companion matrix is an :math:`n \times n` matrix formed with the
elements in the first column defined as
:math:`\phi_i = -\frac{c_i}{c_0}, i \in 1, \dots, p`.
If the coefficients provided are matrices :math:`(C_0, C_1, \dots, C_p)`,
each of shape :math:`(m, m)`, then the companion matrix is an
:math:`nm \times nm` matrix formed with the elements in the first column
defined as :math:`\phi_i = -C_0^{-1} C_i', i \in 1, \dots, p`.
It is important to understand the expected signs of the coefficients. A
typical AR(p) model is written as:
.. math::
y_t = a_1 y_{t-1} + \dots + a_p y_{t-p} + \varepsilon_t
This can be rewritten as:
.. math::
(1 - a_1 L - \dots - a_p L^p )y_t = \varepsilon_t \\
(1 + c_1 L + \dots + c_p L^p )y_t = \varepsilon_t \\
c(L) y_t = \varepsilon_t
The coefficients from this form are defined to be :math:`c_i = - a_i`, and
it is the :math:`c_i` coefficients that this function expects to be
provided.
"""
identity_matrix = False
if isinstance(polynomial, (int, np.integer)):
# GH 5570, allow numpy integer types, but coerce to python int
n = int(polynomial)
m = 1
polynomial = None
else:
n = len(polynomial) - 1
if n < 1:
raise ValueError("Companion matrix polynomials must include at"
" least two terms.")
if isinstance(polynomial, list) or isinstance(polynomial, tuple):
try:
# Note: cannot use polynomial[0] because of the special
# behavior associated with matrix polynomials and the constant
# 1, see below.
m = len(polynomial[1])
except TypeError:
m = 1
# Check if we just have a scalar polynomial
if m == 1:
polynomial = np.asanyarray(polynomial)
# Check if 1 was passed as the first argument (indicating an
# identity matrix)
elif polynomial[0] == 1:
polynomial[0] = np.eye(m)
identity_matrix = True
else:
m = 1
polynomial = np.asanyarray(polynomial)
matrix = np.zeros((n * m, n * m), dtype=np.asanyarray(polynomial).dtype)
idx = np.diag_indices((n - 1) * m)
idx = (idx[0], idx[1] + m)
matrix[idx] = 1
if polynomial is not None and n > 0:
if m == 1:
matrix[:, 0] = -polynomial[1:] / polynomial[0]
elif identity_matrix:
for i in range(n):
matrix[i * m:(i + 1) * m, :m] = -polynomial[i+1].T
else:
inv = np.linalg.inv(polynomial[0])
for i in range(n):
matrix[i * m:(i + 1) * m, :m] = -np.dot(inv, polynomial[i+1]).T
return matrix
def diff(series, k_diff=1, k_seasonal_diff=None, seasonal_periods=1):
r"""
Difference a series simply and/or seasonally along the zero-th axis.
Given a series (denoted :math:`y_t`), performs the differencing operation
.. math::
\Delta^d \Delta_s^D y_t
where :math:`d =` `diff`, :math:`s =` `seasonal_periods`,
:math:`D =` `seasonal\_diff`, and :math:`\Delta` is the difference
operator.
Parameters
----------
series : array_like
The series to be differenced.
diff : int, optional
The number of simple differences to perform. Default is 1.
seasonal_diff : int or None, optional
The number of seasonal differences to perform. Default is no seasonal
differencing.
seasonal_periods : int, optional
The seasonal lag. Default is 1. Unused if there is no seasonal
differencing.
Returns
-------
differenced : ndarray
The differenced array.
"""
pandas = _is_using_pandas(series, None)
differenced = np.asanyarray(series) if not pandas else series
# Seasonal differencing
if k_seasonal_diff is not None:
while k_seasonal_diff > 0:
if not pandas:
differenced = (differenced[seasonal_periods:] -
differenced[:-seasonal_periods])
else:
sdiffed = differenced.diff(seasonal_periods)
differenced = sdiffed[seasonal_periods:]
k_seasonal_diff -= 1
# Simple differencing
if not pandas:
differenced = np.diff(differenced, k_diff, axis=0)
else:
while k_diff > 0:
differenced = differenced.diff()[1:]
k_diff -= 1
return differenced
def concat(series, axis=0, allow_mix=False):
"""
Concatenate a set of series.
Parameters
----------
series : iterable
An iterable of series to be concatenated
axis : int, optional
The axis along which to concatenate. Default is 1 (columns).
allow_mix : bool
Whether or not to allow a mix of pandas and non-pandas objects. Default
is False. If true, the returned object is an ndarray, and additional
pandas metadata (e.g. column names, indices, etc) is lost.
Returns
-------
concatenated : array or pd.DataFrame
The concatenated array. Will be a DataFrame if series are pandas
objects.
"""
is_pandas = np.r_[[_is_using_pandas(s, None) for s in series]]
ndim = np.r_[[np.ndim(s) for s in series]]
max_ndim = np.max(ndim)
if max_ndim > 2:
raise ValueError('`tools.concat` does not support arrays with 3 or'
' more dimensions.')
# Make sure the iterable is mutable
if isinstance(series, tuple):
series = list(series)
# Standardize ndim
for i in range(len(series)):
if ndim[i] == 0 and max_ndim == 1:
series[i] = np.atleast_1d(series[i])
elif ndim[i] == 0 and max_ndim == 2:
series[i] = np.atleast_2d(series[i])
elif ndim[i] == 1 and max_ndim == 2 and is_pandas[i]:
name = series[i].name
series[i] = series[i].to_frame()
series[i].columns = [name]
elif ndim[i] == 1 and max_ndim == 2 and not is_pandas[i]:
series[i] = np.atleast_2d(series[i]).T
if np.all(is_pandas):
if isinstance(series[0], pd.DataFrame):
base_columns = series[0].columns
else:
base_columns = pd.Index([series[0].name])
for i in range(1, len(series)):
s = series[i]
if isinstance(s, pd.DataFrame):
# Handle case where we were passed a dataframe and a series
# to concatenate, and the series did not have a name.
if s.columns.equals(pd.Index([None])):
s.columns = base_columns[:1]
s_columns = s.columns
else:
s_columns = pd.Index([s.name])
if axis == 0 and not base_columns.equals(s_columns):
raise ValueError('Columns must match to concatenate along'
' rows.')
elif axis == 1 and not series[0].index.equals(s.index):
raise ValueError('Index must match to concatenate along'
' columns.')
concatenated = pd.concat(series, axis=axis)
elif np.all(~is_pandas) or allow_mix:
concatenated = np.concatenate(series, axis=axis)
else:
raise ValueError('Attempted to concatenate Pandas objects with'
' non-Pandas objects with `allow_mix=False`.')
return concatenated
def is_invertible(polynomial, threshold=1 - 1e-10):
r"""
Determine if a polynomial is invertible.
Requires all roots of the polynomial lie inside the unit circle.
Parameters
----------
polynomial : array_like or tuple, list
Coefficients of a polynomial, in order of increasing degree.
For example, `polynomial=[1, -0.5]` corresponds to the polynomial
:math:`1 - 0.5x` which has root :math:`2`. If it is a matrix
polynomial (in which case the coefficients are coefficient matrices),
a tuple or list of matrices should be passed.
threshold : number
Allowed threshold for `is_invertible` to return True. Default is 1.
See Also
--------
companion_matrix
Notes
-----
If the coefficients provided are scalars :math:`(c_0, c_1, \dots, c_n)`,
then the corresponding polynomial is :math:`c_0 + c_1 L + \dots + c_n L^n`.
If the coefficients provided are matrices :math:`(C_0, C_1, \dots, C_n)`,
then the corresponding polynomial is :math:`C_0 + C_1 L + \dots + C_n L^n`.
There are three equivalent methods of determining if the polynomial
represented by the coefficients is invertible:
The first method factorizes the polynomial into:
.. math::
C(L) & = c_0 + c_1 L + \dots + c_n L^n \\
& = constant (1 - \lambda_1 L)
(1 - \lambda_2 L) \dots (1 - \lambda_n L)
In order for :math:`C(L)` to be invertible, it must be that each factor
:math:`(1 - \lambda_i L)` is invertible; the condition is then that
:math:`|\lambda_i| < 1`, where :math:`\lambda_i` is a root of the
polynomial.
The second method factorizes the polynomial into:
.. math::
C(L) & = c_0 + c_1 L + \dots + c_n L^n \\
& = constant (L - \zeta_1) (L - \zeta_2) \dots (L - \zeta_3)
The condition is now :math:`|\zeta_i| > 1`, where :math:`\zeta_i` is a root
of the polynomial with reversed coefficients and
:math:`\lambda_i = \frac{1}{\zeta_i}`.
Finally, a companion matrix can be formed using the coefficients of the
polynomial. Then the eigenvalues of that matrix give the roots of the
polynomial. This last method is the one actually used.
"""
# First method:
# np.all(np.abs(np.roots(np.r_[1, params])) < 1)
# Second method:
# np.all(np.abs(np.roots(np.r_[1, params][::-1])) > 1)
# Final method:
eigvals = np.linalg.eigvals(companion_matrix(polynomial))
return np.all(np.abs(eigvals) < threshold)
def solve_discrete_lyapunov(a, q, complex_step=False):
r"""
Solves the discrete Lyapunov equation using a bilinear transformation.
Notes
-----
This is a modification of the version in Scipy (see
https://github.com/scipy/scipy/blob/master/scipy/linalg/_solvers.py)
which allows passing through the complex numbers in the matrix a
(usually the transition matrix) in order to allow complex step
differentiation.
"""
eye = np.eye(a.shape[0], dtype=a.dtype)
if not complex_step:
aH = a.conj().transpose()
aHI_inv = np.linalg.inv(aH + eye)
b = np.dot(aH - eye, aHI_inv)
c = 2*np.dot(np.dot(np.linalg.inv(a + eye), q), aHI_inv)
return solve_sylvester(b.conj().transpose(), b, -c)
else:
aH = a.transpose()
aHI_inv = np.linalg.inv(aH + eye)
b = np.dot(aH - eye, aHI_inv)
c = 2*np.dot(np.dot(np.linalg.inv(a + eye), q), aHI_inv)
return solve_sylvester(b.transpose(), b, -c)
def constrain_stationary_univariate(unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : ndarray
Unconstrained parameters used by the optimizer, to be transformed to
stationary coefficients of, e.g., an autoregressive or moving average
component.
Returns
-------
constrained : ndarray
Constrained parameters of, e.g., an autoregressive or moving average
component, to be transformed to arbitrary parameters used by the
optimizer.
References
----------
.. [*] Monahan, John F. 1984.
"A Note on Enforcing Stationarity in
Autoregressive-moving Average Models."
Biometrika 71 (2) (August 1): 403-404.
"""
n = unconstrained.shape[0]
y = np.zeros((n, n), dtype=unconstrained.dtype)
r = unconstrained/((1 + unconstrained**2)**0.5)
for k in range(n):
for i in range(k):
y[k, i] = y[k - 1, i] + r[k] * y[k - 1, k - i - 1]
y[k, k] = r[k]
return -y[n - 1, :]
def unconstrain_stationary_univariate(constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : ndarray
Constrained parameters of, e.g., an autoregressive or moving average
component, to be transformed to arbitrary parameters used by the
optimizer.
Returns
-------
unconstrained : ndarray
Unconstrained parameters used by the optimizer, to be transformed to
stationary coefficients of, e.g., an autoregressive or moving average
component.
References
----------
.. [*] Monahan, John F. 1984.
"A Note on Enforcing Stationarity in
Autoregressive-moving Average Models."
Biometrika 71 (2) (August 1): 403-404.
"""
n = constrained.shape[0]
y = np.zeros((n, n), dtype=constrained.dtype)
y[n-1:] = -constrained
for k in range(n-1, 0, -1):
for i in range(k):
y[k-1, i] = (y[k, i] - y[k, k]*y[k, k-i-1]) / (1 - y[k, k]**2)
r = y.diagonal()
x = r / ((1 - r**2)**0.5)
return x
def _constrain_sv_less_than_one_python(unconstrained, order=None,
k_endog=None):
"""
Transform arbitrary matrices to matrices with singular values less than
one.
Parameters
----------
unconstrained : list
Arbitrary matrices. Should be a list of length `order`, where each
element is an array sized `k_endog` x `k_endog`.
order : int, optional
The order of the autoregression.
k_endog : int, optional
The dimension of the data vector.
Returns
-------
constrained : list
Partial autocorrelation matrices. Should be a list of length
`order`, where each element is an array sized `k_endog` x `k_endog`.
See Also
--------
constrain_stationary_multivariate
Notes
-----
Corresponds to Lemma 2.2 in Ansley and Kohn (1986). See
`constrain_stationary_multivariate` for more details.
"""
from scipy import linalg
constrained = [] # P_s, s = 1, ..., p
if order is None:
order = len(unconstrained)
if k_endog is None:
k_endog = unconstrained[0].shape[0]
eye = np.eye(k_endog)
for i in range(order):
A = unconstrained[i]
B, lower = linalg.cho_factor(eye + np.dot(A, A.T), lower=True)
constrained.append(linalg.solve_triangular(B, A, lower=lower))
return constrained
def _compute_coefficients_from_multivariate_pacf_python(
partial_autocorrelations, error_variance, transform_variance=False,
order=None, k_endog=None):
"""
Transform matrices with singular values less than one to matrices
corresponding to a stationary (or invertible) process.
Parameters
----------
partial_autocorrelations : list
Partial autocorrelation matrices. Should be a list of length `order`,
where each element is an array sized `k_endog` x `k_endog`.
error_variance : ndarray
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`. This is used as input in the algorithm even if
is not transformed by it (when `transform_variance` is False). The
error term variance is required input when transformation is used
either to force an autoregressive component to be stationary or to
force a moving average component to be invertible.
transform_variance : bool, optional
Whether or not to transform the error variance term. This option is
not typically used, and the default is False.
order : int, optional
The order of the autoregression.
k_endog : int, optional
The dimension of the data vector.
Returns
-------
coefficient_matrices : list
Transformed coefficient matrices leading to a stationary VAR
representation.
See Also
--------
constrain_stationary_multivariate
Notes
-----
Corresponds to Lemma 2.1 in Ansley and Kohn (1986). See
`constrain_stationary_multivariate` for more details.
"""
from scipy import linalg
if order is None:
order = len(partial_autocorrelations)
if k_endog is None:
k_endog = partial_autocorrelations[0].shape[0]
# If we want to keep the provided variance but with the constrained
# coefficient matrices, we need to make a copy here, and then after the
# main loop we will transform the coefficients to match the passed variance
if not transform_variance:
initial_variance = error_variance
# Need to make the input variance large enough that the recursions
# do not lead to zero-matrices due to roundoff error, which would case
# exceptions from the Cholesky decompositions.
# Note that this will still not always ensure positive definiteness,
# and for k_endog, order large enough an exception may still be raised
error_variance = np.eye(k_endog) * (order + k_endog)**10
forward_variances = [error_variance] # \Sigma_s
backward_variances = [error_variance] # \Sigma_s^*, s = 0, ..., p
autocovariances = [error_variance] # \Gamma_s
# \phi_{s,k}, s = 1, ..., p
# k = 1, ..., s+1
forwards = []
# \phi_{s,k}^*
backwards = []
error_variance_factor = linalg.cholesky(error_variance, lower=True)
forward_factors = [error_variance_factor]
backward_factors = [error_variance_factor]
# We fill in the entries as follows:
# [1,1]
# [2,2], [2,1]
# [3,3], [3,1], [3,2]
# ...
# [p,p], [p,1], ..., [p,p-1]
# the last row, correctly ordered, is then used as the coefficients
for s in range(order): # s = 0, ..., p-1
prev_forwards = forwards
prev_backwards = backwards
forwards = []
backwards = []
# Create the "last" (k = s+1) matrix
# Note: this is for k = s+1. However, below we then have to fill
# in for k = 1, ..., s in order.
# P L*^{-1} = x
# x L* = P
# L*' x' = P'
forwards.append(
linalg.solve_triangular(
backward_factors[s], partial_autocorrelations[s].T,
lower=True, trans='T'))
forwards[0] = np.dot(forward_factors[s], forwards[0].T)
# P' L^{-1} = x
# x L = P'
# L' x' = P
backwards.append(
linalg.solve_triangular(
forward_factors[s], partial_autocorrelations[s],
lower=True, trans='T'))
backwards[0] = np.dot(backward_factors[s], backwards[0].T)
# Update the variance
# Note: if s >= 1, this will be further updated in the for loop
# below
# Also, this calculation will be re-used in the forward variance
tmp = np.dot(forwards[0], backward_variances[s])
autocovariances.append(tmp.copy().T)
# Create the remaining k = 1, ..., s matrices,
# only has an effect if s >= 1
for k in range(s):
forwards.insert(k, prev_forwards[k] - np.dot(
forwards[-1], prev_backwards[s-(k+1)]))
backwards.insert(k, prev_backwards[k] - np.dot(
backwards[-1], prev_forwards[s-(k+1)]))
autocovariances[s+1] += np.dot(autocovariances[k+1],
prev_forwards[s-(k+1)].T)
# Create forward and backwards variances
forward_variances.append(
forward_variances[s] - np.dot(tmp, forwards[s].T)
)
backward_variances.append(
backward_variances[s] -
np.dot(
np.dot(backwards[s], forward_variances[s]),
backwards[s].T
)
)
# Cholesky factors
forward_factors.append(
linalg.cholesky(forward_variances[s+1], lower=True)
)
backward_factors.append(
linalg.cholesky(backward_variances[s+1], lower=True)
)
# If we do not want to use the transformed variance, we need to
# adjust the constrained matrices, as presented in Lemma 2.3, see above
variance = forward_variances[-1]
if not transform_variance:
# Here, we need to construct T such that:
# variance = T * initial_variance * T'
# To do that, consider the Cholesky of variance (L) and
# input_variance (M) to get:
# L L' = T M M' T' = (TM) (TM)'
# => L = T M
# => L M^{-1} = T
initial_variance_factor = np.linalg.cholesky(initial_variance)
transformed_variance_factor = np.linalg.cholesky(variance)
transform = np.dot(initial_variance_factor,
np.linalg.inv(transformed_variance_factor))
inv_transform = np.linalg.inv(transform)
for i in range(order):
forwards[i] = (
np.dot(np.dot(transform, forwards[i]), inv_transform)
)
return forwards, variance
def constrain_stationary_multivariate_python(unconstrained, error_variance,
transform_variance=False,
prefix=None):
r"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation for a vector autoregression.
Parameters
----------
unconstrained : array or list
Arbitrary matrices to be transformed to stationary coefficient matrices
of the VAR. If a list, should be a list of length `order`, where each
element is an array sized `k_endog` x `k_endog`. If an array, should be
the matrices horizontally concatenated and sized
`k_endog` x `k_endog * order`.
error_variance : ndarray
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`. This is used as input in the algorithm even if
is not transformed by it (when `transform_variance` is False). The
error term variance is required input when transformation is used
either to force an autoregressive component to be stationary or to
force a moving average component to be invertible.
transform_variance : bool, optional
Whether or not to transform the error variance term. This option is
not typically used, and the default is False.
prefix : {'s','d','c','z'}, optional
The appropriate BLAS prefix to use for the passed datatypes. Only
use if absolutely sure that the prefix is correct or an error will
result.
Returns
-------
constrained : array or list
Transformed coefficient matrices leading to a stationary VAR
representation. Will match the type of the passed `unconstrained`
variable (so if a list was passed, a list will be returned).
Notes
-----
In the notation of [1]_, the arguments `(variance, unconstrained)` are
written as :math:`(\Sigma, A_1, \dots, A_p)`, where :math:`p` is the order
of the vector autoregression, and is here determined by the length of
the `unconstrained` argument.
There are two steps in the constraining algorithm.
First, :math:`(A_1, \dots, A_p)` are transformed into
:math:`(P_1, \dots, P_p)` via Lemma 2.2 of [1]_.
Second, :math:`(\Sigma, P_1, \dots, P_p)` are transformed into
:math:`(\Sigma, \phi_1, \dots, \phi_p)` via Lemmas 2.1 and 2.3 of [1]_.
If `transform_variance=True`, then only Lemma 2.1 is applied in the second
step.
While this function can be used even in the univariate case, it is much
slower, so in that case `constrain_stationary_univariate` is preferred.
References
----------
.. [1] Ansley, Craig F., and Robert Kohn. 1986.
"A Note on Reparameterizing a Vector Autoregressive Moving Average Model
to Enforce Stationarity."
Journal of Statistical Computation and Simulation 24 (2): 99-106.
.. [*] Ansley, Craig F, and Paul Newbold. 1979.
"Multivariate Partial Autocorrelations."
In Proceedings of the Business and Economic Statistics Section, 349-53.
American Statistical Association
"""
use_list = type(unconstrained) == list
if not use_list:
k_endog, order = unconstrained.shape
order //= k_endog
unconstrained = [
unconstrained[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(order)
]
order = len(unconstrained)
k_endog = unconstrained[0].shape[0]
# Step 1: convert from arbitrary matrices to those with singular values
# less than one.
sv_constrained = _constrain_sv_less_than_one_python(
unconstrained, order, k_endog)
# Step 2: convert matrices from our "partial autocorrelation matrix" space
# (matrices with singular values less than one) to the space of stationary
# coefficient matrices
constrained, var = _compute_coefficients_from_multivariate_pacf_python(
sv_constrained, error_variance, transform_variance, order, k_endog)
if not use_list:
constrained = np.concatenate(constrained, axis=1).reshape(
k_endog, k_endog * order)
return constrained, var
@Appender(constrain_stationary_multivariate_python.__doc__)
def constrain_stationary_multivariate(unconstrained, variance,
transform_variance=False,
prefix=None):
use_list = type(unconstrained) == list
if use_list:
unconstrained = np.concatenate(unconstrained, axis=1)
k_endog, order = unconstrained.shape
order //= k_endog
if order < 1:
raise ValueError('Must have order at least 1')
if k_endog < 1:
raise ValueError('Must have at least 1 endogenous variable')
if prefix is None:
prefix, dtype, _ = find_best_blas_type(
[unconstrained, variance])
dtype = prefix_dtype_map[prefix]
unconstrained = np.asfortranarray(unconstrained, dtype=dtype)
variance = np.asfortranarray(variance, dtype=dtype)
# Step 1: convert from arbitrary matrices to those with singular values
# less than one.
# sv_constrained = _constrain_sv_less_than_one(unconstrained, order,
# k_endog, prefix)
sv_constrained = prefix_sv_map[prefix](unconstrained, order, k_endog)
# Step 2: convert matrices from our "partial autocorrelation matrix"
# space (matrices with singular values less than one) to the space of
# stationary coefficient matrices
constrained, variance = prefix_pacf_map[prefix](
sv_constrained, variance, transform_variance, order, k_endog)
constrained = np.array(constrained, dtype=dtype)
variance = np.array(variance, dtype=dtype)
if use_list:
constrained = [
constrained[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(order)
]
return constrained, variance
def _unconstrain_sv_less_than_one(constrained, order=None, k_endog=None):
"""
Transform matrices with singular values less than one to arbitrary
matrices.
Parameters
----------
constrained : list
The partial autocorrelation matrices. Should be a list of length
`order`, where each element is an array sized `k_endog` x `k_endog`.
order : int, optional
The order of the autoregression.
k_endog : int, optional
The dimension of the data vector.
Returns
-------
unconstrained : list
Unconstrained matrices. A list of length `order`, where each element is
an array sized `k_endog` x `k_endog`.
See Also
--------
unconstrain_stationary_multivariate
Notes
-----
Corresponds to the inverse of Lemma 2.2 in Ansley and Kohn (1986). See
`unconstrain_stationary_multivariate` for more details.
"""
from scipy import linalg
unconstrained = [] # A_s, s = 1, ..., p
if order is None:
order = len(constrained)
if k_endog is None:
k_endog = constrained[0].shape[0]
eye = np.eye(k_endog)
for i in range(order):
P = constrained[i]
# B^{-1} B^{-1}' = I - P P'
B_inv, lower = linalg.cho_factor(eye - np.dot(P, P.T), lower=True)
# A = BP
# B^{-1} A = P
unconstrained.append(linalg.solve_triangular(B_inv, P, lower=lower))
return unconstrained
def _compute_multivariate_sample_acovf(endog, maxlag):
r"""
Computer multivariate sample autocovariances
Parameters
----------
endog : array_like
Sample data on which to compute sample autocovariances. Shaped
`nobs` x `k_endog`.
maxlag : int
Maximum lag to use when computing the sample autocovariances.
Returns
-------
sample_autocovariances : list
A list of the first `maxlag` sample autocovariance matrices. Each
matrix is shaped `k_endog` x `k_endog`.
Notes
-----
This function computes the forward sample autocovariances:
.. math::
\hat \Gamma(s) = \frac{1}{n} \sum_{t=1}^{n-s}
(Z_t - \bar Z) (Z_{t+s} - \bar Z)'
See page 353 of Wei (1990). This function is primarily implemented for
checking the partial autocorrelation functions below, and so is quite slow.
References
----------
.. [*] Wei, William. 1990.
Time Series Analysis : Univariate and Multivariate Methods. Boston:
Pearson.
"""
# Get the (demeaned) data as an array
endog = np.array(endog)
if endog.ndim == 1:
endog = endog[:, np.newaxis]
endog -= np.mean(endog, axis=0)
# Dimensions
nobs, k_endog = endog.shape
sample_autocovariances = []
for s in range(maxlag + 1):
sample_autocovariances.append(np.zeros((k_endog, k_endog)))
for t in range(nobs - s):
sample_autocovariances[s] += np.outer(endog[t], endog[t+s])
sample_autocovariances[s] /= nobs
return sample_autocovariances
def _compute_multivariate_acovf_from_coefficients(
coefficients, error_variance, maxlag=None,
forward_autocovariances=False):
r"""
Compute multivariate autocovariances from vector autoregression coefficient
matrices
Parameters
----------
coefficients : array or list
The coefficients matrices. If a list, should be a list of length
`order`, where each element is an array sized `k_endog` x `k_endog`. If
an array, should be the coefficient matrices horizontally concatenated
and sized `k_endog` x `k_endog * order`.
error_variance : ndarray
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`.
maxlag : int, optional
The maximum autocovariance to compute. Default is `order`-1. Can be
zero, in which case it returns the variance.
forward_autocovariances : bool, optional
Whether or not to compute forward autocovariances
:math:`E(y_t y_{t+j}')`. Default is False, so that backward
autocovariances :math:`E(y_t y_{t-j}')` are returned.
Returns
-------
autocovariances : list
A list of the first `maxlag` autocovariance matrices. Each matrix is
shaped `k_endog` x `k_endog`.
Notes
-----
Computes
.. math::
\Gamma(j) = E(y_t y_{t-j}')
for j = 1, ..., `maxlag`, unless `forward_autocovariances` is specified,
in which case it computes:
.. math::
E(y_t y_{t+j}') = \Gamma(j)'
Coefficients are assumed to be provided from the VAR model:
.. math::
y_t = A_1 y_{t-1} + \dots + A_p y_{t-p} + \varepsilon_t
Autocovariances are calculated by solving the associated discrete Lyapunov
equation of the state space representation of the VAR process.
"""
from scipy import linalg
# Convert coefficients to a list of matrices, for use in
# `companion_matrix`; get dimensions
if type(coefficients) == list:
order = len(coefficients)
k_endog = coefficients[0].shape[0]
else:
k_endog, order = coefficients.shape
order //= k_endog
coefficients = [
coefficients[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(order)
]
if maxlag is None:
maxlag = order-1
# Start with VAR(p): w_{t+1} = phi_1 w_t + ... + phi_p w_{t-p+1} + u_{t+1}
# Then stack the VAR(p) into a VAR(1) in companion matrix form:
# z_{t+1} = F z_t + v_t
companion = companion_matrix(
[1] + [-np.squeeze(coefficients[i]) for i in range(order)]
).T
# Compute the error variance matrix for the stacked form: E v_t v_t'
selected_variance = np.zeros(companion.shape)
selected_variance[:k_endog, :k_endog] = error_variance
# Compute the unconditional variance of z_t: E z_t z_t'
stacked_cov = linalg.solve_discrete_lyapunov(companion, selected_variance)
# The first (block) row of the variance of z_t gives the first p-1
# autocovariances of w_t: \Gamma_i = E w_t w_t+i with \Gamma_0 = Var(w_t)
# Note: these are okay, checked against ArmaProcess
autocovariances = [
stacked_cov[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(min(order, maxlag+1))
]
for i in range(maxlag - (order-1)):
stacked_cov = np.dot(companion, stacked_cov)
autocovariances += [
stacked_cov[:k_endog, -k_endog:]
]
if forward_autocovariances:
for i in range(len(autocovariances)):
autocovariances[i] = autocovariances[i].T
return autocovariances
def _compute_multivariate_sample_pacf(endog, maxlag):
"""
Computer multivariate sample partial autocorrelations
Parameters
----------
endog : array_like
Sample data on which to compute sample autocovariances. Shaped
`nobs` x `k_endog`.
maxlag : int
Maximum lag for which to calculate sample partial autocorrelations.
Returns
-------
sample_pacf : list
A list of the first `maxlag` sample partial autocorrelation matrices.
Each matrix is shaped `k_endog` x `k_endog`.
"""
sample_autocovariances = _compute_multivariate_sample_acovf(endog, maxlag)
return _compute_multivariate_pacf_from_autocovariances(
sample_autocovariances)
def _compute_multivariate_pacf_from_autocovariances(autocovariances,
order=None, k_endog=None):
"""
Compute multivariate partial autocorrelations from autocovariances.
Parameters
----------
autocovariances : list
Autocorrelations matrices. Should be a list of length `order` + 1,
where each element is an array sized `k_endog` x `k_endog`.
order : int, optional
The order of the autoregression.
k_endog : int, optional
The dimension of the data vector.
Returns
-------
pacf : list
List of first `order` multivariate partial autocorrelations.
See Also
--------
unconstrain_stationary_multivariate
Notes
-----
Note that this computes multivariate partial autocorrelations.
Corresponds to the inverse of Lemma 2.1 in Ansley and Kohn (1986). See
`unconstrain_stationary_multivariate` for more details.
Computes sample partial autocorrelations if sample autocovariances are
given.
"""
from scipy import linalg
if order is None:
order = len(autocovariances)-1
if k_endog is None:
k_endog = autocovariances[0].shape[0]
# Now apply the Ansley and Kohn (1986) algorithm, except that instead of
# calculating phi_{s+1, s+1} = L_s P_{s+1} {L_s^*}^{-1} (which requires
# the partial autocorrelation P_{s+1} which is what we're trying to
# calculate here), we calculate it as in Ansley and Newbold (1979), using
# the autocovariances \Gamma_s and the forwards and backwards residual
# variances \Sigma_s, \Sigma_s^*:
# phi_{s+1, s+1} = [ \Gamma_{s+1}' - \phi_{s,1} \Gamma_s' - ... -
# \phi_{s,s} \Gamma_1' ] {\Sigma_s^*}^{-1}
# Forward and backward variances
forward_variances = [] # \Sigma_s
backward_variances = [] # \Sigma_s^*, s = 0, ..., p
# \phi_{s,k}, s = 1, ..., p
# k = 1, ..., s+1
forwards = []
# \phi_{s,k}^*
backwards = []
forward_factors = [] # L_s
backward_factors = [] # L_s^*, s = 0, ..., p
# Ultimately we want to construct the partial autocorrelation matrices
# Note that this is "1-indexed" in the sense that it stores P_1, ... P_p
# rather than starting with P_0.
partial_autocorrelations = []
# We fill in the entries of phi_{s,k} as follows:
# [1,1]
# [2,2], [2,1]
# [3,3], [3,1], [3,2]
# ...
# [p,p], [p,1], ..., [p,p-1]
# the last row, correctly ordered, should be the same as the coefficient
# matrices provided in the argument `constrained`
for s in range(order): # s = 0, ..., p-1
prev_forwards = list(forwards)
prev_backwards = list(backwards)
forwards = []
backwards = []
# Create forward and backwards variances Sigma_s, Sigma*_s
forward_variance = autocovariances[0].copy()
backward_variance = autocovariances[0].T.copy()
for k in range(s):
forward_variance -= np.dot(prev_forwards[k],
autocovariances[k+1])
backward_variance -= np.dot(prev_backwards[k],
autocovariances[k+1].T)
forward_variances.append(forward_variance)
backward_variances.append(backward_variance)
# Cholesky factors
forward_factors.append(
linalg.cholesky(forward_variances[s], lower=True)
)
backward_factors.append(
linalg.cholesky(backward_variances[s], lower=True)
)
# Create the intermediate sum term
if s == 0:
# phi_11 = \Gamma_1' \Gamma_0^{-1}
# phi_11 \Gamma_0 = \Gamma_1'
# \Gamma_0 phi_11' = \Gamma_1
forwards.append(linalg.cho_solve(
(forward_factors[0], True), autocovariances[1]).T)
# backwards.append(forwards[-1])
# phi_11_star = \Gamma_1 \Gamma_0^{-1}
# phi_11_star \Gamma_0 = \Gamma_1
# \Gamma_0 phi_11_star' = \Gamma_1'
backwards.append(linalg.cho_solve(
(backward_factors[0], True), autocovariances[1].T).T)
else:
# G := \Gamma_{s+1}' -
# \phi_{s,1} \Gamma_s' - .. - \phi_{s,s} \Gamma_1'
tmp_sum = autocovariances[s+1].T.copy()
for k in range(s):
tmp_sum -= np.dot(prev_forwards[k], autocovariances[s-k].T)
# Create the "last" (k = s+1) matrix
# Note: this is for k = s+1. However, below we then have to
# fill in for k = 1, ..., s in order.
# phi = G Sigma*^{-1}
# phi Sigma* = G
# Sigma*' phi' = G'
# Sigma* phi' = G'
# (because Sigma* is symmetric)
forwards.append(linalg.cho_solve(
(backward_factors[s], True), tmp_sum.T).T)
# phi = G' Sigma^{-1}
# phi Sigma = G'
# Sigma' phi' = G
# Sigma phi' = G
# (because Sigma is symmetric)
backwards.append(linalg.cho_solve(
(forward_factors[s], True), tmp_sum).T)
# Create the remaining k = 1, ..., s matrices,
# only has an effect if s >= 1
for k in range(s):
forwards.insert(k, prev_forwards[k] - np.dot(
forwards[-1], prev_backwards[s-(k+1)]))
backwards.insert(k, prev_backwards[k] - np.dot(
backwards[-1], prev_forwards[s-(k+1)]))
# Partial autocorrelation matrix: P_{s+1}
# P = L^{-1} phi L*
# L P = (phi L*)
partial_autocorrelations.append(linalg.solve_triangular(
forward_factors[s], np.dot(forwards[s], backward_factors[s]),
lower=True))
return partial_autocorrelations
def _compute_multivariate_pacf_from_coefficients(constrained, error_variance,
order=None, k_endog=None):
r"""
Transform matrices corresponding to a stationary (or invertible) process
to matrices with singular values less than one.
Parameters
----------
constrained : array or list
The coefficients matrices. If a list, should be a list of length
`order`, where each element is an array sized `k_endog` x `k_endog`. If
an array, should be the coefficient matrices horizontally concatenated
and sized `k_endog` x `k_endog * order`.
error_variance : ndarray
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`.
order : int, optional
The order of the autoregression.
k_endog : int, optional
The dimension of the data vector.
Returns
-------
pacf : list
List of first `order` multivariate partial autocorrelations.
See Also
--------
unconstrain_stationary_multivariate
Notes
-----
Note that this computes multivariate partial autocorrelations.
Corresponds to the inverse of Lemma 2.1 in Ansley and Kohn (1986). See
`unconstrain_stationary_multivariate` for more details.
Notes
-----
Coefficients are assumed to be provided from the VAR model:
.. math::
y_t = A_1 y_{t-1} + \dots + A_p y_{t-p} + \varepsilon_t
"""
if type(constrained) == list:
order = len(constrained)
k_endog = constrained[0].shape[0]
else:
k_endog, order = constrained.shape
order //= k_endog
# Get autocovariances for the process; these are defined to be
# E z_t z_{t-j}'
# However, we want E z_t z_{t+j}' = (E z_t z_{t-j}')'
_acovf = _compute_multivariate_acovf_from_coefficients
autocovariances = [
autocovariance.T for autocovariance in
_acovf(constrained, error_variance, maxlag=order)]
return _compute_multivariate_pacf_from_autocovariances(autocovariances)
def unconstrain_stationary_multivariate(constrained, error_variance):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Parameters
----------
constrained : array or list
Constrained parameters of, e.g., an autoregressive or moving average
component, to be transformed to arbitrary parameters used by the
optimizer. If a list, should be a list of length `order`, where each
element is an array sized `k_endog` x `k_endog`. If an array, should be
the coefficient matrices horizontally concatenated and sized
`k_endog` x `k_endog * order`.
error_variance : ndarray
The variance / covariance matrix of the error term. Should be sized
`k_endog` x `k_endog`. This is used as input in the algorithm even if
is not transformed by it (when `transform_variance` is False).
Returns
-------
unconstrained : ndarray
Unconstrained parameters used by the optimizer, to be transformed to
stationary coefficients of, e.g., an autoregressive or moving average
component. Will match the type of the passed `constrained`
variable (so if a list was passed, a list will be returned).
Notes
-----
Uses the list representation internally, even if an array is passed.
References
----------
.. [*] Ansley, Craig F., and Robert Kohn. 1986.
"A Note on Reparameterizing a Vector Autoregressive Moving Average Model
to Enforce Stationarity."
Journal of Statistical Computation and Simulation 24 (2): 99-106.
"""
use_list = type(constrained) == list
if not use_list:
k_endog, order = constrained.shape
order //= k_endog
constrained = [
constrained[:k_endog, i*k_endog:(i+1)*k_endog]
for i in range(order)
]
else:
order = len(constrained)
k_endog = constrained[0].shape[0]
# Step 1: convert matrices from the space of stationary
# coefficient matrices to our "partial autocorrelation matrix" space
# (matrices with singular values less than one)
partial_autocorrelations = _compute_multivariate_pacf_from_coefficients(
constrained, error_variance, order, k_endog)
# Step 2: convert from arbitrary matrices to those with singular values
# less than one.
unconstrained = _unconstrain_sv_less_than_one(
partial_autocorrelations, order, k_endog)
if not use_list:
unconstrained = np.concatenate(unconstrained, axis=1)
return unconstrained, error_variance
def validate_matrix_shape(name, shape, nrows, ncols, nobs):
"""
Validate the shape of a possibly time-varying matrix, or raise an exception
Parameters
----------
name : str
The name of the matrix being validated (used in exception messages)
shape : array_like
The shape of the matrix to be validated. May be of size 2 or (if
the matrix is time-varying) 3.
nrows : int
The expected number of rows.
ncols : int
The expected number of columns.
nobs : int
The number of observations (used to validate the last dimension of a
time-varying matrix)
Raises
------
ValueError
If the matrix is not of the desired shape.
"""
ndim = len(shape)
# Enforce dimension
if ndim not in [2, 3]:
raise ValueError('Invalid value for %s matrix. Requires a'
' 2- or 3-dimensional array, got %d dimensions' %
(name, ndim))
# Enforce the shape of the matrix
if not shape[0] == nrows:
raise ValueError('Invalid dimensions for %s matrix: requires %d'
' rows, got %d' % (name, nrows, shape[0]))
if not shape[1] == ncols:
raise ValueError('Invalid dimensions for %s matrix: requires %d'
' columns, got %d' % (name, ncols, shape[1]))
# If we do not yet know `nobs`, do not allow time-varying arrays
if nobs is None and not (ndim == 2 or shape[-1] == 1):
raise ValueError('Invalid dimensions for %s matrix: time-varying'
' matrices cannot be given unless `nobs` is specified'
' (implicitly when a dataset is bound or else set'
' explicity)' % name)
# Enforce time-varying array size
if ndim == 3 and nobs is not None and not shape[-1] in [1, nobs]:
raise ValueError('Invalid dimensions for time-varying %s'
' matrix. Requires shape (*,*,%d), got %s' %
(name, nobs, str(shape)))
def validate_vector_shape(name, shape, nrows, nobs):
"""
Validate the shape of a possibly time-varying vector, or raise an exception
Parameters
----------
name : str
The name of the vector being validated (used in exception messages)
shape : array_like
The shape of the vector to be validated. May be of size 1 or (if
the vector is time-varying) 2.
nrows : int
The expected number of rows (elements of the vector).
nobs : int
The number of observations (used to validate the last dimension of a
time-varying vector)
Raises
------
ValueError
If the vector is not of the desired shape.
"""
ndim = len(shape)
# Enforce dimension
if ndim not in [1, 2]:
raise ValueError('Invalid value for %s vector. Requires a'
' 1- or 2-dimensional array, got %d dimensions' %
(name, ndim))
# Enforce the shape of the vector
if not shape[0] == nrows:
raise ValueError('Invalid dimensions for %s vector: requires %d'
' rows, got %d' % (name, nrows, shape[0]))
# If we do not yet know `nobs`, do not allow time-varying arrays
if nobs is None and not (ndim == 1 or shape[-1] == 1):
raise ValueError('Invalid dimensions for %s vector: time-varying'
' vectors cannot be given unless `nobs` is specified'
' (implicitly when a dataset is bound or else set'
' explicity)' % name)
# Enforce time-varying array size
if ndim == 2 and not shape[1] in [1, nobs]:
raise ValueError('Invalid dimensions for time-varying %s'
' vector. Requires shape (*,%d), got %s' %
(name, nobs, str(shape)))
def reorder_missing_matrix(matrix, missing, reorder_rows=False,
reorder_cols=False, is_diagonal=False,
inplace=False, prefix=None):
"""
Reorder the rows or columns of a time-varying matrix where all non-missing
values are in the upper left corner of the matrix.
Parameters
----------
matrix : array_like
The matrix to be reordered. Must have shape (n, m, nobs).
missing : array_like of bool
The vector of missing indices. Must have shape (k, nobs) where `k = n`
if `reorder_rows is True` and `k = m` if `reorder_cols is True`.
reorder_rows : bool, optional
Whether or not the rows of the matrix should be re-ordered. Default
is False.
reorder_cols : bool, optional
Whether or not the columns of the matrix should be re-ordered. Default
is False.
is_diagonal : bool, optional
Whether or not the matrix is diagonal. If this is True, must also have
`n = m`. Default is False.
inplace : bool, optional
Whether or not to reorder the matrix in-place.
prefix : {'s', 'd', 'c', 'z'}, optional
The Fortran prefix of the vector. Default is to automatically detect
the dtype. This parameter should only be used with caution.
Returns
-------
reordered_matrix : array_like
The reordered matrix.
"""
if prefix is None:
prefix = find_best_blas_type((matrix,))[0]
reorder = prefix_reorder_missing_matrix_map[prefix]
if not inplace:
matrix = np.copy(matrix, order='F')
reorder(matrix, np.asfortranarray(missing), reorder_rows, reorder_cols,
is_diagonal)
return matrix
def reorder_missing_vector(vector, missing, inplace=False, prefix=None):
"""
Reorder the elements of a time-varying vector where all non-missing
values are in the first elements of the vector.
Parameters
----------
vector : array_like
The vector to be reordered. Must have shape (n, nobs).
missing : array_like of bool
The vector of missing indices. Must have shape (n, nobs).
inplace : bool, optional
Whether or not to reorder the matrix in-place. Default is False.
prefix : {'s', 'd', 'c', 'z'}, optional
The Fortran prefix of the vector. Default is to automatically detect
the dtype. This parameter should only be used with caution.
Returns
-------
reordered_vector : array_like
The reordered vector.
"""
if prefix is None:
prefix = find_best_blas_type((vector,))[0]
reorder = prefix_reorder_missing_vector_map[prefix]
if not inplace:
vector = np.copy(vector, order='F')
reorder(vector, np.asfortranarray(missing))
return vector
def copy_missing_matrix(A, B, missing, missing_rows=False, missing_cols=False,
is_diagonal=False, inplace=False, prefix=None):
"""
Copy the rows or columns of a time-varying matrix where all non-missing
values are in the upper left corner of the matrix.
Parameters
----------
A : array_like
The matrix from which to copy. Must have shape (n, m, nobs) or
(n, m, 1).
B : array_like
The matrix to copy to. Must have shape (n, m, nobs).
missing : array_like of bool
The vector of missing indices. Must have shape (k, nobs) where `k = n`
if `reorder_rows is True` and `k = m` if `reorder_cols is True`.
missing_rows : bool, optional
Whether or not the rows of the matrix are a missing dimension. Default
is False.
missing_cols : bool, optional
Whether or not the columns of the matrix are a missing dimension.
Default is False.
is_diagonal : bool, optional
Whether or not the matrix is diagonal. If this is True, must also have
`n = m`. Default is False.
inplace : bool, optional
Whether or not to copy to B in-place. Default is False.
prefix : {'s', 'd', 'c', 'z'}, optional
The Fortran prefix of the vector. Default is to automatically detect
the dtype. This parameter should only be used with caution.
Returns
-------
copied_matrix : array_like
The matrix B with the non-missing submatrix of A copied onto it.
"""
if prefix is None:
prefix = find_best_blas_type((A, B))[0]
copy = prefix_copy_missing_matrix_map[prefix]
if not inplace:
B = np.copy(B, order='F')
# We may have been given an F-contiguous memoryview; in that case, we do
# not want to alter it or convert it to a numpy array
try:
if not A.is_f_contig():
raise ValueError()
except (AttributeError, ValueError):
A = np.asfortranarray(A)
copy(A, B, np.asfortranarray(missing), missing_rows, missing_cols,
is_diagonal)
return B
def copy_missing_vector(a, b, missing, inplace=False, prefix=None):
"""
Reorder the elements of a time-varying vector where all non-missing
values are in the first elements of the vector.
Parameters
----------
a : array_like
The vector from which to copy. Must have shape (n, nobs) or (n, 1).
b : array_like
The vector to copy to. Must have shape (n, nobs).
missing : array_like of bool
The vector of missing indices. Must have shape (n, nobs).
inplace : bool, optional
Whether or not to copy to b in-place. Default is False.
prefix : {'s', 'd', 'c', 'z'}, optional
The Fortran prefix of the vector. Default is to automatically detect
the dtype. This parameter should only be used with caution.
Returns
-------
copied_vector : array_like
The vector b with the non-missing subvector of b copied onto it.
"""
if prefix is None:
prefix = find_best_blas_type((a, b))[0]
copy = prefix_copy_missing_vector_map[prefix]
if not inplace:
b = np.copy(b, order='F')
# We may have been given an F-contiguous memoryview; in that case, we do
# not want to alter it or convert it to a numpy array
try:
if not a.is_f_contig():
raise ValueError()
except (AttributeError, ValueError):
a = np.asfortranarray(a)
copy(a, b, np.asfortranarray(missing))
return b
def copy_index_matrix(A, B, index, index_rows=False, index_cols=False,
is_diagonal=False, inplace=False, prefix=None):
"""
Copy the rows or columns of a time-varying matrix where all non-index
values are in the upper left corner of the matrix.
Parameters
----------
A : array_like
The matrix from which to copy. Must have shape (n, m, nobs) or
(n, m, 1).
B : array_like
The matrix to copy to. Must have shape (n, m, nobs).
index : array_like of bool
The vector of index indices. Must have shape (k, nobs) where `k = n`
if `reorder_rows is True` and `k = m` if `reorder_cols is True`.
index_rows : bool, optional
Whether or not the rows of the matrix are a index dimension. Default
is False.
index_cols : bool, optional
Whether or not the columns of the matrix are a index dimension.
Default is False.
is_diagonal : bool, optional
Whether or not the matrix is diagonal. If this is True, must also have
`n = m`. Default is False.
inplace : bool, optional
Whether or not to copy to B in-place. Default is False.
prefix : {'s', 'd', 'c', 'z'}, optional
The Fortran prefix of the vector. Default is to automatically detect
the dtype. This parameter should only be used with caution.
Returns
-------
copied_matrix : array_like
The matrix B with the non-index submatrix of A copied onto it.
"""
if prefix is None:
prefix = find_best_blas_type((A, B))[0]
copy = prefix_copy_index_matrix_map[prefix]
if not inplace:
B = np.copy(B, order='F')
# We may have been given an F-contiguous memoryview; in that case, we do
# not want to alter it or convert it to a numpy array
try:
if not A.is_f_contig():
raise ValueError()
except (AttributeError, ValueError):
A = np.asfortranarray(A)
copy(A, B, np.asfortranarray(index), index_rows, index_cols,
is_diagonal)
return B
def copy_index_vector(a, b, index, inplace=False, prefix=None):
"""
Reorder the elements of a time-varying vector where all non-index
values are in the first elements of the vector.
Parameters
----------
a : array_like
The vector from which to copy. Must have shape (n, nobs) or (n, 1).
b : array_like
The vector to copy to. Must have shape (n, nobs).
index : array_like of bool
The vector of index indices. Must have shape (n, nobs).
inplace : bool, optional
Whether or not to copy to b in-place. Default is False.
prefix : {'s', 'd', 'c', 'z'}, optional
The Fortran prefix of the vector. Default is to automatically detect
the dtype. This parameter should only be used with caution.
Returns
-------
copied_vector : array_like
The vector b with the non-index subvector of b copied onto it.
"""
if prefix is None:
prefix = find_best_blas_type((a, b))[0]
copy = prefix_copy_index_vector_map[prefix]
if not inplace:
b = np.copy(b, order='F')
# We may have been given an F-contiguous memoryview; in that case, we do
# not want to alter it or convert it to a numpy array
try:
if not a.is_f_contig():
raise ValueError()
except (AttributeError, ValueError):
a = np.asfortranarray(a)
copy(a, b, np.asfortranarray(index))
return b
def prepare_exog(exog):
k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
k_exog = exog.shape[1]
return (k_exog, exog)
def prepare_trend_spec(trend):
# Trend
if trend is None or trend == 'n':
polynomial_trend = np.ones(0)
elif trend == 'c':
polynomial_trend = np.r_[1]
elif trend == 't':
polynomial_trend = np.r_[0, 1]
elif trend == 'ct':
polynomial_trend = np.r_[1, 1]
elif trend == 'ctt':
# TODO deprecate ctt?
polynomial_trend = np.r_[1, 1, 1]
else:
trend = np.array(trend)
if trend.ndim > 0:
polynomial_trend = (trend > 0).astype(int)
else:
raise ValueError('Invalid trend method.')
# Note: k_trend is not the degree of the trend polynomial, because e.g.
# k_trend = 1 corresponds to the degree zero polynomial (with only a
# constant term).
k_trend = int(np.sum(polynomial_trend))
return polynomial_trend, k_trend
def prepare_trend_data(polynomial_trend, k_trend, nobs, offset=1):
# Cache the arrays for calculating the intercept from the trend
# components
time_trend = np.arange(offset, nobs + offset)
trend_data = np.zeros((nobs, k_trend))
i = 0
for k in polynomial_trend.nonzero()[0]:
if k == 0:
trend_data[:, i] = np.ones(nobs,)
else:
trend_data[:, i] = time_trend**k
i += 1
return trend_data
|
{
"content_hash": "6cc09dcc5fa1e79b236c8f2d2f6fccd8",
"timestamp": "",
"source": "github",
"line_count": 1883,
"max_line_length": 79,
"avg_line_length": 35.059479553903344,
"alnum_prop": 0.6060105730342185,
"repo_name": "jseabold/statsmodels",
"id": "cfd452532ae86dfeb13dca07aaaa2d6653248cb8",
"size": "66017",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "statsmodels/tsa/statespace/tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Matlab",
"bytes": "1383"
},
{
"name": "Python",
"bytes": "8609450"
},
{
"name": "R",
"bytes": "34228"
},
{
"name": "Stata",
"bytes": "41179"
}
],
"symlink_target": ""
}
|
from time import sleep
from pytest import raises, fixture
from threading import Event
from promise import (
Promise,
is_thenable,
promisify,
promise_for_dict as free_promise_for_dict,
)
from concurrent.futures import Future
from threading import Thread
from .utils import assert_exception
class DelayedFulfill(Thread):
def __init__(self, d, p, v):
self.delay = d
self.promise = p
self.value = v
Thread.__init__(self)
def run(self):
sleep(self.delay)
self.promise.do_resolve(self.value)
class DelayedRejection(Thread):
def __init__(self, d, p, r):
self.delay = d
self.promise = p
self.reason = r
Thread.__init__(self)
def run(self):
sleep(self.delay)
self.promise.do_reject(self.reason)
class FakeThenPromise:
def __init__(self, raises=True):
self.raises = raises
def then(self, s=None, f=None):
if self.raises:
raise Exception("FakeThenPromise raises in 'then'")
def df(value, dtime):
p = Promise()
t = DelayedFulfill(dtime, p, value)
t.start()
return p
def dr(reason, dtime):
p = Promise()
t = DelayedRejection(dtime, p, reason)
t.start()
return p
# Static methods
def test_fulfilled():
p = Promise.fulfilled(4)
assert p.is_fulfilled
assert p.get() == 4
def test_rejected():
p = Promise.rejected(Exception("Static rejected"))
assert p.is_rejected
with raises(Exception) as exc_info:
p.get()
assert str(exc_info.value) == "Static rejected"
# Fulfill
def test_fulfill_self():
p = Promise()
with raises(TypeError) as excinfo:
p.do_resolve(p)
p.get()
# Exceptions
def test_exceptions():
def throws(v):
assert False
p1 = Promise()
p1.then(throws)
p1.do_resolve(5)
p2 = Promise()
p2.catch(throws)
p2.do_reject(Exception())
with raises(Exception) as excinfo:
p2.get()
def test_thrown_exceptions_have_stacktrace():
def throws(v):
assert False
p3 = Promise.resolve("a").then(throws)
with raises(AssertionError) as assert_exc:
p3.get()
assert assert_exc.traceback[-1].path.strpath == __file__
def test_thrown_exceptions_preserve_stacktrace():
def throws(v):
assert False
def after_throws(v):
pass
p3 = Promise.resolve("a").then(throws).then(after_throws)
with raises(AssertionError) as assert_exc:
p3.get()
assert assert_exc.traceback[-1].path.strpath == __file__
# WAIT
# def test_wait_when():
# p1 = df(5, 0.01)
# assert p1.is_pending
# p1._wait()
# assert p1.is_fulfilled
def test_wait_if():
p1 = Promise()
p1.do_resolve(5)
p1._wait()
assert p1.is_fulfilled
# def test_wait_timeout():
# p1 = df(5, 0.1)
# assert p1.is_pending
# with raises(Exception) as exc_info:
# p1._wait(timeout=0.05)
# assert str(exc_info.value) == "Timeout"
# assert p1.is_pending
# p1._wait()
# assert p1.is_fulfilled
# # GET
# def test_get_when():
# p1 = df(5, 0.01)
# assert p1.is_pending
# v = p1.get()
# assert p1.is_fulfilled
# assert 5 == v
def test_get_if():
p1 = Promise()
p1.do_resolve(5)
v = p1.get()
assert p1.is_fulfilled
assert 5 == v
# def test_get_timeout():
# p1 = df(5, 0.1)
# assert p1.is_pending
# with raises(Exception) as exc_info:
# p1._wait(timeout=0.05)
# assert str(exc_info.value) == "Timeout"
# assert p1.is_pending
# v = p1.get()
# assert p1.is_fulfilled
# assert 5 == v
# Promise.all
def test_promise_all_when():
p1 = Promise()
p2 = Promise()
pl = Promise.all([p1, p2])
assert p1.is_pending
assert p2.is_pending
assert pl.is_pending
p1.do_resolve(5)
p1._wait()
assert p1.is_fulfilled
assert p2.is_pending
assert pl.is_pending
p2.do_resolve(10)
p2._wait()
pl._wait()
assert p1.is_fulfilled
assert p2.is_fulfilled
assert pl.is_fulfilled
assert 5 == p1.get()
assert 10 == p2.get()
assert 5 == pl.get()[0]
assert 10 == pl.get()[1]
def test_promise_all_when_mixed_promises():
p1 = Promise()
p2 = Promise()
pl = Promise.all([p1, 32, p2, False, True])
assert p1.is_pending
assert p2.is_pending
assert pl.is_pending
p1.do_resolve(5)
p1._wait()
assert p1.is_fulfilled
assert p2.is_pending
assert pl.is_pending
p2.do_resolve(10)
p2._wait()
pl._wait()
assert p1.is_fulfilled
assert p2.is_fulfilled
assert pl.is_fulfilled
assert 5 == p1.get()
assert 10 == p2.get()
assert pl.get() == [5, 32, 10, False, True]
def test_promise_all_when_if_no_promises():
pl = Promise.all([10, 32, False, True])
assert pl.get() == [10, 32, False, True]
def test_promise_all_if():
p1 = Promise()
p2 = Promise()
pd1 = Promise.all([p1, p2])
pd2 = Promise.all([p1])
pd3 = Promise.all([])
pd3._wait()
assert p1.is_pending
assert p2.is_pending
assert pd1.is_pending
assert pd2.is_pending
assert pd3.is_fulfilled
p1.do_resolve(5)
p1._wait()
pd2._wait()
assert p1.is_fulfilled
assert p2.is_pending
assert pd1.is_pending
assert pd2.is_fulfilled
p2.do_resolve(10)
p2._wait()
pd1._wait()
pd2._wait()
assert p1.is_fulfilled
assert p2.is_fulfilled
assert pd1.is_fulfilled
assert pd2.is_fulfilled
assert 5 == p1.get()
assert 10 == p2.get()
assert 5 == pd1.get()[0]
assert 5 == pd2.get()[0]
assert 10 == pd1.get()[1]
assert [] == pd3.get()
# promise_for_dict
@fixture(params=[Promise.for_dict, free_promise_for_dict])
def promise_for_dict(request):
return request.param
def test_dict_promise_when(promise_for_dict):
p1 = Promise()
p2 = Promise()
d = {"a": p1, "b": p2}
pd1 = promise_for_dict(d)
pd2 = promise_for_dict({"a": p1})
pd3 = promise_for_dict({})
assert p1.is_pending
assert p2.is_pending
assert pd1.is_pending
assert pd2.is_pending
pd3._wait()
assert pd3.is_fulfilled
p1.do_resolve(5)
p1._wait()
pd2._wait()
assert p1.is_fulfilled
assert p2.is_pending
assert pd1.is_pending
assert pd2.is_fulfilled
p2.do_resolve(10)
p2._wait()
pd1._wait()
assert p1.is_fulfilled
assert p2.is_fulfilled
assert pd1.is_fulfilled
assert pd2.is_fulfilled
assert 5 == p1.get()
assert 10 == p2.get()
assert 5 == pd1.get()["a"]
assert 5 == pd2.get()["a"]
assert 10 == pd1.get()["b"]
assert {} == pd3.get()
def test_dict_promise_if(promise_for_dict):
p1 = Promise()
p2 = Promise()
d = {"a": p1, "b": p2}
pd = promise_for_dict(d)
assert p1.is_pending
assert p2.is_pending
assert pd.is_pending
p1.do_resolve(5)
p1._wait()
assert p1.is_fulfilled
assert p2.is_pending
assert pd.is_pending
p2.do_resolve(10)
p2._wait()
assert p1.is_fulfilled
assert p2.is_fulfilled
# pd._wait()
# assert pd.is_fulfilled
# assert 5 == p1.get()
# assert 10 == p2.get()
# assert 5 == pd.get()["a"]
# assert 10 == pd.get()["b"]
def test_done():
counter = [0]
r = Promise()
def inc(_):
counter[0] += 1
def dec(_):
counter[0] -= 1
def end(_):
r.do_resolve(None)
p = Promise()
p.done(inc, dec)
p.done(inc, dec)
p.done(end)
p.do_resolve(4)
Promise.wait(r)
assert counter[0] == 2
r = Promise()
counter = [0]
p = Promise()
p.done(inc, dec)
p.done(inc, dec)
p.done(None, end)
p.do_reject(Exception())
Promise.wait(r)
assert counter[0] == -2
def test_done_all():
counter = [0]
def inc(_):
counter[0] += 1
def dec(_):
counter[0] -= 1
p = Promise()
r = Promise()
p.done_all()
p.done_all([(inc, dec)])
p.done_all(
[
(inc, dec),
(inc, dec),
{"success": inc, "failure": dec},
lambda _: r.do_resolve(None),
]
)
p.do_resolve(4)
Promise.wait(r)
assert counter[0] == 4
p = Promise()
r = Promise()
p.done_all()
p.done_all([inc])
p.done_all([(inc, dec)])
p.done_all(
[
(inc, dec),
{"success": inc, "failure": dec},
(None, lambda _: r.do_resolve(None)),
]
)
p.do_reject(Exception("Uh oh!"))
Promise.wait(r)
assert counter[0] == 1
def test_then_all():
p = Promise()
handlers = [
((lambda x: x * x), (lambda r: 1)),
{"success": (lambda x: x + x), "failure": (lambda r: 2)},
]
results = (
p.then_all()
+ p.then_all([lambda x: x])
+ p.then_all([(lambda x: x * x, lambda r: 1)])
+ p.then_all(handlers)
)
p.do_resolve(4)
assert [r.get() for r in results] == [4, 16, 16, 8]
p = Promise()
handlers = [
((lambda x: x * x), (lambda r: 1)),
{"success": (lambda x: x + x), "failure": (lambda r: 2)},
]
results = (
p.then_all()
+ p.then_all([(lambda x: x * x, lambda r: 1)])
+ p.then_all(handlers)
)
p.do_reject(Exception())
assert [r.get() for r in results] == [1, 1, 2]
def test_do_resolve():
p1 = Promise(lambda resolve, reject: resolve(0))
assert p1.get() == 0
assert p1.is_fulfilled
def test_do_resolve_fail_on_call():
def raises(resolve, reject):
raise Exception("Fails")
p1 = Promise(raises)
assert not p1.is_fulfilled
assert str(p1.reason) == "Fails"
def test_catch():
p1 = Promise(lambda resolve, reject: resolve(0))
p2 = p1.then(lambda value: 1 / value).catch(lambda e: e).then(lambda e: type(e))
assert p2.get() == ZeroDivisionError
assert p2.is_fulfilled
def test_is_thenable_promise():
promise = Promise()
assert is_thenable(promise)
def test_is_thenable_then_object():
promise = FakeThenPromise()
assert not is_thenable(promise)
def test_is_thenable_future():
promise = Future()
assert is_thenable(promise)
def test_is_thenable_simple_object():
assert not is_thenable(object())
@fixture(params=[Promise.resolve])
def resolve(request):
return request.param
def test_resolve_promise(resolve):
promise = Promise()
assert resolve(promise) == promise
def test_resolve_then_object(resolve):
promise = FakeThenPromise(raises=False)
p = resolve(promise)
assert isinstance(p, Promise)
def test_resolve_future(resolve):
future = Future()
promise = resolve(future)
assert promise.is_pending
future.set_result(1)
assert promise.get() == 1
assert promise.is_fulfilled
def test_resolve_future_rejected(resolve):
future = Future()
promise = resolve(future)
assert promise.is_pending
future.set_exception(Exception("Future rejected"))
assert promise.is_rejected
assert_exception(promise.reason, Exception, "Future rejected")
def test_resolve_object(resolve):
val = object()
promised = resolve(val)
assert isinstance(promised, Promise)
assert promised.get() == val
def test_resolve_promise_subclass():
class MyPromise(Promise):
pass
p = Promise()
p.do_resolve(10)
m_p = MyPromise.resolve(p)
assert isinstance(m_p, MyPromise)
assert m_p.get() == p.get()
def test_promise_repr_pending():
promise = Promise()
assert repr(promise) == "<Promise at {} pending>".format(hex(id(promise)))
def test_promise_repr_pending():
val = {1: 2}
promise = Promise.fulfilled(val)
promise._wait()
assert repr(promise) == "<Promise at {} fulfilled with {}>".format(
hex(id(promise)), repr(val)
)
def test_promise_repr_fulfilled():
val = {1: 2}
promise = Promise.fulfilled(val)
promise._wait()
assert repr(promise) == "<Promise at {} fulfilled with {}>".format(
hex(id(promise)), repr(val)
)
def test_promise_repr_rejected():
err = Exception("Error!")
promise = Promise.rejected(err)
promise._wait()
assert repr(promise) == "<Promise at {} rejected with {}>".format(
hex(id(promise)), repr(err)
)
def test_promise_loop():
def by_two(result):
return result * 2
def executor(resolve, reject):
resolve(Promise.resolve(1).then(lambda v: Promise.resolve(v).then(by_two)))
p = Promise(executor)
assert p.get(.1) == 2
def test_resolve_future_like(resolve):
class CustomThenable(object):
def add_done_callback(self, f):
f(True)
def done(self):
return True
def exception(self):
pass
def result(self):
return True
instance = CustomThenable()
promise = resolve(instance)
assert promise.get() == True
def sum_function(a, b):
return a + b
def test_promisify_function_resolved(resolve):
promisified_func = promisify(sum_function)
result = promisified_func(1, 2)
assert isinstance(result, Promise)
assert result.get() == 3
def test_promisify_function_rejected(resolve):
promisified_func = promisify(sum_function)
result = promisified_func(None, None)
assert isinstance(result, Promise)
with raises(Exception) as exc_info_promise:
result.get()
with raises(Exception) as exc_info:
sum_function(None, None)
assert str(exc_info_promise.value) == str(exc_info.value)
def test_promises_with_only_then():
context = {"success": False}
error = RuntimeError("Ooops!")
promise1 = Promise(
lambda resolve, reject: context.update({"promise1_reject": reject})
)
promise2 = promise1.then(lambda x: None)
promise3 = promise1.then(lambda x: None)
context["promise1_reject"](error)
promise2._wait()
promise3._wait()
assert promise2.reason == error
assert promise3.reason == error
def test_promises_promisify_still_works_but_deprecated_for_non_callables():
x = promisify(1)
assert isinstance(x, Promise)
assert x.get() == 1
# def test_promise_loop():
# values = Promise.resolve([1, None, 2])
# def on_error(error):
# error
# def executor(resolve, reject):
# resolve(Promise.resolve(values).then(lambda values: Promise.all([Promise.resolve(values[0])]).catch(on_error)))
# p = Promise(executor)
# assert p.get(.1) == 2
|
{
"content_hash": "ef9ae5b5e6618b5d3875648f3593cf20",
"timestamp": "",
"source": "github",
"line_count": 668,
"max_line_length": 121,
"avg_line_length": 21.69610778443114,
"alnum_prop": 0.5925619264472504,
"repo_name": "syrusakbary/pypromise",
"id": "4a083718d134ede122d5a761c592318a9fa5748e",
"size": "14574",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_extra.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37318"
}
],
"symlink_target": ""
}
|
"""
GTK+ Matplotlib interface using cairo (not GDK) drawing operations.
Author: Steve Chaplin
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import gtk
if gtk.pygtk_version < (2, 7, 0):
import cairo.gtk
from matplotlib import cbook
from matplotlib.backends import backend_cairo
from matplotlib.backends.backend_gtk import *
from matplotlib.backends.backend_gtk import _BackendGTK
backend_version = ('PyGTK(%d.%d.%d) ' % gtk.pygtk_version
+ 'Pycairo(%s)' % backend_cairo.backend_version)
class RendererGTKCairo (backend_cairo.RendererCairo):
if gtk.pygtk_version >= (2,7,0):
def set_pixmap (self, pixmap):
self.gc.ctx = pixmap.cairo_create()
else:
def set_pixmap (self, pixmap):
self.gc.ctx = cairo.gtk.gdk_cairo_create (pixmap)
class FigureCanvasGTKCairo(backend_cairo.FigureCanvasCairo, FigureCanvasGTK):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(backend_cairo.FigureCanvasCairo.filetypes)
def _renderer_init(self):
"""Override to use cairo (rather than GDK) renderer"""
self._renderer = RendererGTKCairo(self.figure.dpi)
# This class has been unused for a while at least.
@cbook.deprecated("2.1")
class FigureManagerGTKCairo(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKCairo (canvas, self.window)
else:
toolbar = None
return toolbar
# This class has been unused for a while at least.
@cbook.deprecated("2.1")
class NavigationToolbar2Cairo(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKCairo(fig)
@_BackendGTK.export
class _BackendGTKCairo(_BackendGTK):
FigureCanvas = FigureCanvasGTKCairo
FigureManager = FigureManagerGTK
|
{
"content_hash": "845f1a2cd84a8d3273c2fcc04bab29c7",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 77,
"avg_line_length": 31.40625,
"alnum_prop": 0.6945273631840796,
"repo_name": "louisLouL/pair_trading",
"id": "a8cdf076a93f2d422c9dcb1c4f91ea5f9a92d022",
"size": "2010",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "capstone_env/lib/python3.6/site-packages/matplotlib/backends/backend_gtkcairo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "148513"
},
{
"name": "C++",
"bytes": "172384"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "Fortran",
"bytes": "8281"
},
{
"name": "HTML",
"bytes": "568460"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Python",
"bytes": "30357437"
},
{
"name": "Shell",
"bytes": "3260"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
}
|
import numpy
from ginga.gw import Widgets
from ginga.misc import Bunch
from ginga import GingaPlugin
class Info(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Info, self).__init__(fv)
self.active = None
self.info = None
# truncate names after this size
self.maxstr = 60
#self.w = Bunch.Bunch()
fv.add_callback('add-channel', self.add_channel)
fv.add_callback('delete-channel', self.delete_channel)
fv.add_callback('field-info', self.field_info)
fv.add_callback('channel-change', self.focus_cb)
def build_gui(self, container):
nb = Widgets.StackWidget()
self.nb = nb
container.add_widget(self.nb, stretch=1)
def _create_info_window(self):
sw = Widgets.ScrollArea()
vbox = Widgets.VBox()
sw.set_widget(vbox)
captions = (('Name:', 'label', 'Name', 'llabel'),
('Object:', 'label', 'Object', 'llabel'),
('X:', 'label', 'X', 'llabel'),
('Y:', 'label', 'Y', 'llabel'),
('Value:', 'label', 'Value', 'llabel'),
('RA:', 'label', 'RA', 'llabel'),
('DEC:', 'label', 'DEC', 'llabel'),
('Equinox:', 'label', 'Equinox', 'llabel'),
('Dimensions:', 'label', 'Dimensions', 'llabel'),
('Min:', 'label', 'Min', 'llabel'),
('Max:', 'label', 'Max', 'llabel'),
)
w, b = Widgets.build_info(captions)
col = Widgets.VBox()
row = Widgets.HBox()
row.set_spacing(0)
row.set_border_width(0)
row.add_widget(w, stretch=0)
row.add_widget(Widgets.Label(''), stretch=1)
col.add_widget(row, stretch=0)
#col.add_widget(Widgets.Label(''), stretch=1)
sw2 = Widgets.ScrollArea()
# hack for Qt to expand this widget properly
sw2.cfg_expand(0x7, 0x4)
sw2.set_widget(col)
vbox.add_widget(sw2, stretch=2)
# stretcher
vbox.add_widget(Widgets.Label(''), stretch=1)
captions = (('Zoom:', 'label', 'Zoom', 'llabel'),
('Cut Low:', 'label', 'Cut Low Value', 'llabel',
'Cut Low', 'entry'),
('Cut High:', 'label', 'Cut High Value', 'llabel',
'Cut High', 'entry'),
('Auto Levels', 'button', 'spacer1', 'spacer',
'Cut Levels', 'button'),
('Cut New:', 'label', 'Cut New', 'llabel'),
('Zoom New:', 'label', 'Zoom New', 'llabel'),
('Center New:', 'label', 'Center New', 'llabel'),
)
w, b2 = Widgets.build_info(captions)
b.update(b2)
# TODO: need a more general solution to gtk labels resizing their
# parent window
#b.object.set_length(12)
b.cut_levels.set_tooltip("Set cut levels manually")
b.auto_levels.set_tooltip("Set cut levels by algorithm")
b.cut_low.set_tooltip("Set low cut level (press Enter)")
b.cut_low_value.set_text('')
b.cut_high.set_tooltip("Set high cut level (press Enter)")
b.cut_high_value.set_text('')
b.cut_new.set_text('')
b.zoom_new.set_text('')
b.center_new.set_text('')
row = Widgets.HBox()
row.set_spacing(0)
row.set_border_width(0)
row.add_widget(w, stretch=0)
## row.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(row, stretch=0)
return sw, b
def add_channel(self, viewer, channel):
sw, winfo = self._create_info_window()
chname = channel.name
self.nb.add_widget(sw, title=chname)
index = self.nb.index_of(sw)
info = Bunch.Bunch(widget=sw, winfo=winfo,
mode_w=None,
chinfo=channel)
channel.extdata._info_info = info
winfo.cut_low.add_callback('activated', self.cut_levels,
channel.fitsimage, info)
winfo.cut_high.add_callback('activated', self.cut_levels,
channel.fitsimage, info)
winfo.cut_levels.add_callback('activated', self.cut_levels,
channel.fitsimage, info)
winfo.auto_levels.add_callback('activated', self.auto_levels,
channel.fitsimage, info)
fitsimage = channel.fitsimage
fitssettings = fitsimage.get_settings()
for name in ['cuts']:
fitssettings.getSetting(name).add_callback('set',
self.cutset_cb, fitsimage, info)
for name in ['scale']:
fitssettings.getSetting(name).add_callback('set',
self.zoomset_cb, fitsimage, info)
fitssettings.getSetting('autocuts').add_callback('set',
self.autocuts_cb, fitsimage, info)
fitssettings.getSetting('autozoom').add_callback('set',
self.autozoom_cb, fitsimage, info)
fitssettings.getSetting('autocenter').add_callback('set',
self.autocenter_cb, fitsimage, info)
def delete_channel(self, viewer, channel):
chname = channel.name
self.logger.debug("deleting channel %s" % (chname))
info = channel.extdata._info_info
widget = info.widget
self.nb.remove(widget, delete=True)
self.active = None
self.info = None
# CALLBACKS
def redo(self, channel, image):
fitsimage = channel.fitsimage
info = channel.extdata._info_info
# add cb to image so that if it is modified we can update info
image.add_callback('modified', self.image_update_cb, fitsimage, info)
self.set_info(info, fitsimage)
return True
def image_update_cb(self, image, fitsimage, info):
cur_img = fitsimage.get_image()
if cur_img == image:
self.fv.gui_do(self.set_info, info, fitsimage)
return True
def focus_cb(self, viewer, channel):
chname = channel.name
if self.active != chname:
if not channel.extdata.has_key('_info_info'):
self.add_channel(viewer, channel)
info = channel.extdata._info_info
widget = info.widget
index = self.nb.index_of(widget)
self.nb.set_index(index)
self.active = chname
self.info = info
self.set_info(self.info, channel.fitsimage)
def zoomset_cb(self, setting, value, fitsimage, info):
"""This callback is called when the main window is zoomed.
"""
#scale_x, scale_y = fitsimage.get_scale_xy()
scale_x, scale_y = value
# Set text showing zoom factor (1X, 2X, etc.)
if scale_x == scale_y:
text = self.fv.scale2text(scale_x)
else:
textx = self.fv.scale2text(scale_x)
texty = self.fv.scale2text(scale_y)
text = "X: %s Y: %s" % (textx, texty)
info.winfo.zoom.set_text(text)
def cutset_cb(self, setting, value, fitsimage, info):
loval, hival = value
#info.winfo.cut_low.set_text('%.4g' % (loval))
info.winfo.cut_low_value.set_text('%.4g' % (loval))
#info.winfo.cut_high.set_text('%.4g' % (hival))
info.winfo.cut_high_value.set_text('%.4g' % (hival))
def autocuts_cb(self, setting, option, fitsimage, info):
info.winfo.cut_new.set_text(option)
def autozoom_cb(self, setting, option, fitsimage, info):
info.winfo.zoom_new.set_text(option)
def autocenter_cb(self, setting, option, fitsimage, info):
# Hack to convert old values that used to be T/F
if isinstance(option, bool):
choice = { True: 'on', False: 'off' }
option = choice[option]
info.winfo.center_new.set_text(option)
# LOGIC
def trunc(self, s):
if len(s) > self.maxstr:
return s[:self.maxstr-3] + '...'
else:
return s
def set_info(self, info, fitsimage):
image = fitsimage.get_image()
if image is None:
return
header = image.get_header()
# Update info panel
name = self.trunc(image.get('name', 'Noname'))
info.winfo.name.set_text(name)
objtext = self.trunc(header.get('OBJECT', 'UNKNOWN'))
info.winfo.object.set_text(objtext)
equinox = header.get('EQUINOX', '')
info.winfo.equinox.set_text(str(equinox))
# Show min, max values
width, height = fitsimage.get_data_size()
minval, maxval = image.get_minmax(noinf=False)
info.winfo.max.set_text(str(maxval))
info.winfo.min.set_text(str(minval))
# Show cut levels
loval, hival = fitsimage.get_cut_levels()
#info.winfo.cut_low.set_text('%.4g' % (loval))
info.winfo.cut_low_value.set_text('%.4g' % (loval))
#info.winfo.cut_high.set_text('%.4g' % (hival))
info.winfo.cut_high_value.set_text('%.4g' % (hival))
# Show dimensions
dim_txt = "%dx%d" % (width, height)
info.winfo.dimensions.set_text(dim_txt)
# update zoom indicator
scalefactor = fitsimage.get_scale()
text = self.fv.scale2text(scalefactor)
info.winfo.zoom.set_text(text)
# update cut new/zoom new indicators
t_ = fitsimage.get_settings()
info.winfo.cut_new.set_text(t_['autocuts'])
info.winfo.zoom_new.set_text(t_['autozoom'])
option = t_['autocenter']
# Hack to convert old values that used to be T/F
if isinstance(option, bool):
choice = { True: 'on', False: 'off' }
option = choice[option]
info.winfo.center_new.set_text(option)
def field_info(self, viewer, channel, info):
chname = channel.name
if not channel.extdata.has_key('_info_info'):
return
obj = channel.extdata._info_info
obj.winfo.x.set_text("%.3f" % info.x)
obj.winfo.y.set_text("%.3f" % info.y)
obj.winfo.value.set_text(str(info.value))
if 'ra_txt' in info:
obj.winfo.ra.set_text(info.ra_txt)
obj.winfo.dec.set_text(info.dec_txt)
if 'ra_lbl' in info:
obj.winfo.lbl_ra.set_text(info.ra_lbl+':')
obj.winfo.lbl_dec.set_text(info.dec_lbl+':')
def cut_levels(self, w, fitsimage, info):
loval, hival = fitsimage.get_cut_levels()
try:
lostr = info.winfo.cut_low.get_text().strip()
if lostr != '':
loval = float(lostr)
histr = info.winfo.cut_high.get_text().strip()
if histr != '':
hival = float(histr)
self.logger.debug("locut=%f hicut=%f" % (loval, hival))
return fitsimage.cut_levels(loval, hival)
except Exception as e:
self.fv.show_error("Error cutting levels: %s" % (str(e)))
return True
def auto_levels(self, w, fitsimage, info):
fitsimage.auto_levels()
def __str__(self):
return 'info'
#END
|
{
"content_hash": "c28c2b51deca9ed0df514b8a082c2f85",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 77,
"avg_line_length": 36.5048231511254,
"alnum_prop": 0.5432044393552365,
"repo_name": "stscieisenhamer/ginga",
"id": "09079eacc46abf58446b3874c95d3a592baa7430",
"size": "11523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ginga/rv/plugins/Info.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2151"
},
{
"name": "JavaScript",
"bytes": "82354"
},
{
"name": "Python",
"bytes": "2763201"
}
],
"symlink_target": ""
}
|
import time
from neutronclient.common import exceptions as neutron_client_exc
from oslo.config import cfg
from nova.compute import flavors
from nova.compute import utils as compute_utils
from nova import conductor
from nova import exception
from nova.network import base_api
from nova.network import model as network_model
from nova.network import neutronv2
from nova.network.neutronv2 import constants
from nova.network.security_group import openstack_driver
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import uuidutils
neutron_opts = [
cfg.StrOpt('neutron_url',
default='http://127.0.0.1:9696',
help='URL for connecting to neutron'),
cfg.IntOpt('neutron_url_timeout',
default=30,
help='Timeout value for connecting to neutron in seconds'),
cfg.StrOpt('neutron_admin_username',
help='Username for connecting to neutron in admin context'),
cfg.StrOpt('neutron_admin_password',
help='Password for connecting to neutron in admin context',
secret=True),
cfg.StrOpt('neutron_admin_tenant_id',
help='Tenant id for connecting to neutron in admin context'),
cfg.StrOpt('neutron_admin_tenant_name',
help='Tenant name for connecting to neutron in admin context. '
'This option is mutually exclusive with '
'neutron_admin_tenant_id. Note that with Keystone V3 '
'tenant names are only unique within a domain.'),
cfg.StrOpt('neutron_region_name',
help='Region name for connecting to neutron in admin context'),
cfg.StrOpt('neutron_admin_auth_url',
default='http://localhost:5000/v2.0',
help='Authorization URL for connecting to neutron in admin '
'context'),
cfg.BoolOpt('neutron_api_insecure',
default=False,
help='If set, ignore any SSL validation issues'),
cfg.StrOpt('neutron_auth_strategy',
default='keystone',
help='Authorization strategy for connecting to '
'neutron in admin context'),
# TODO(berrange) temporary hack until Neutron can pass over the
# name of the OVS bridge it is configured with
cfg.StrOpt('neutron_ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch'),
cfg.IntOpt('neutron_extension_sync_interval',
default=600,
help='Number of seconds before querying neutron for'
' extensions'),
cfg.StrOpt('neutron_ca_certificates_file',
help='Location of CA certificates file to use for '
'neutron client requests.'),
]
CONF = cfg.CONF
CONF.register_opts(neutron_opts)
CONF.import_opt('default_floating_pool', 'nova.network.floating_ips')
CONF.import_opt('flat_injected', 'nova.network.manager')
LOG = logging.getLogger(__name__)
class API(base_api.NetworkAPI):
"""API for interacting with the neutron 2.x API."""
def __init__(self):
super(API, self).__init__()
self.last_neutron_extension_sync = None
self.extensions = {}
self.conductor_api = conductor.API()
self.security_group_api = (
openstack_driver.get_openstack_security_group_driver())
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures."""
def _get_available_networks(self, context, project_id,
net_ids=None, neutron=None):
"""Return a network list available for the tenant.
The list contains networks owned by the tenant and public networks.
If net_ids specified, it searches networks with requested IDs only.
"""
if not neutron:
neutron = neutronv2.get_client(context)
if net_ids:
# If user has specified to attach instance only to specific
# networks then only add these to **search_opts. This search will
# also include 'shared' networks.
search_opts = {'id': net_ids}
nets = neutron.list_networks(**search_opts).get('networks', [])
else:
# (1) Retrieve non-public network list owned by the tenant.
search_opts = {'tenant_id': project_id, 'shared': False}
nets = neutron.list_networks(**search_opts).get('networks', [])
# (2) Retrieve public network list.
search_opts = {'shared': True}
nets += neutron.list_networks(**search_opts).get('networks', [])
_ensure_requested_network_ordering(
lambda x: x['id'],
nets,
net_ids)
if not context.is_admin:
for net in nets:
# Perform this check here rather than in validate_networks to
# ensure the check is performed everytime allocate_for_instance
# is invoked
if net.get('router:external'):
raise exception.ExternalNetworkAttachForbidden(
network_uuid=net['id'])
return nets
def _create_port(self, port_client, instance, network_id, port_req_body,
fixed_ip=None, security_group_ids=None,
available_macs=None, dhcp_opts=None):
"""Attempts to create a port for the instance on the given network.
:param port_client: The client to use to create the port.
:param instance: Create the port for the given instance.
:param network_id: Create the port on the given network.
:param port_req_body: Pre-populated port request. Should have the
device_id, device_owner, and any required neutron extension values.
:param fixed_ip: Optional fixed IP to use from the given network.
:param security_group_ids: Optional list of security group IDs to
apply to the port.
:param available_macs: Optional set of available MAC addresses to use.
:param dhcp_opts: Optional DHCP options.
:returns: ID of the created port.
:raises PortLimitExceeded: If neutron fails with an OverQuota error.
"""
try:
if fixed_ip:
port_req_body['port']['fixed_ips'] = [{'ip_address': fixed_ip}]
port_req_body['port']['network_id'] = network_id
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = instance['project_id']
if security_group_ids:
port_req_body['port']['security_groups'] = security_group_ids
if available_macs is not None:
if not available_macs:
raise exception.PortNotFree(
instance=instance['display_name'])
mac_address = available_macs.pop()
port_req_body['port']['mac_address'] = mac_address
if dhcp_opts is not None:
port_req_body['port']['extra_dhcp_opts'] = dhcp_opts
port_id = port_client.create_port(port_req_body)['port']['id']
LOG.debug('Successfully created port: %s', port_id,
instance=instance)
return port_id
except neutron_client_exc.NeutronClientException as e:
# NOTE(mriedem): OverQuota in neutron is a 409
if e.status_code == 409:
LOG.warning(_('Neutron error: quota exceeded'))
raise exception.PortLimitExceeded()
with excutils.save_and_reraise_exception():
LOG.exception(_('Neutron error creating port on network %s'),
network_id, instance=instance)
def allocate_for_instance(self, context, instance, **kwargs):
"""Allocate network resources for the instance.
:param requested_networks: optional value containing
network_id, fixed_ip, and port_id
:param security_groups: security groups to allocate for instance
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
NB: NeutronV2 currently assigns hypervisor supplied MAC addresses
to arbitrary networks, which requires openflow switches to
function correctly if more than one network is being used with
the bare metal hypervisor (which is the only one known to limit
MAC addresses).
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
"""
hypervisor_macs = kwargs.get('macs', None)
available_macs = None
if hypervisor_macs is not None:
# Make a copy we can mutate: records macs that have not been used
# to create a port on a network. If we find a mac with a
# pre-allocated port we also remove it from this set.
available_macs = set(hypervisor_macs)
neutron = neutronv2.get_client(context)
LOG.debug('allocate_for_instance() for %s',
instance['display_name'])
if not instance['project_id']:
msg = _('empty project id for instance %s')
raise exception.InvalidInput(
reason=msg % instance['display_name'])
requested_networks = kwargs.get('requested_networks')
dhcp_opts = kwargs.get('dhcp_options', None)
ports = {}
fixed_ips = {}
net_ids = []
if requested_networks:
for network_id, fixed_ip, port_id in requested_networks:
if port_id:
port = neutron.show_port(port_id)['port']
if port.get('device_id'):
raise exception.PortInUse(port_id=port_id)
if hypervisor_macs is not None:
if port['mac_address'] not in hypervisor_macs:
raise exception.PortNotUsable(port_id=port_id,
instance=instance['display_name'])
else:
# Don't try to use this MAC if we need to create a
# port on the fly later. Identical MACs may be
# configured by users into multiple ports so we
# discard rather than popping.
available_macs.discard(port['mac_address'])
network_id = port['network_id']
ports[network_id] = port
elif fixed_ip and network_id:
fixed_ips[network_id] = fixed_ip
if network_id:
net_ids.append(network_id)
nets = self._get_available_networks(context, instance['project_id'],
net_ids)
if not nets:
LOG.warn(_("No network configured!"), instance=instance)
return network_model.NetworkInfo([])
security_groups = kwargs.get('security_groups', [])
security_group_ids = []
# TODO(arosen) Should optimize more to do direct query for security
# group if len(security_groups) == 1
if len(security_groups):
search_opts = {'tenant_id': instance['project_id']}
user_security_groups = neutron.list_security_groups(
**search_opts).get('security_groups')
for security_group in security_groups:
name_match = None
uuid_match = None
for user_security_group in user_security_groups:
if user_security_group['name'] == security_group:
if name_match:
raise exception.NoUniqueMatch(
_("Multiple security groups found matching"
" '%s'. Use an ID to be more specific.") %
security_group)
name_match = user_security_group['id']
if user_security_group['id'] == security_group:
uuid_match = user_security_group['id']
# If a user names the security group the same as
# another's security groups uuid, the name takes priority.
if not name_match and not uuid_match:
raise exception.SecurityGroupNotFound(
security_group_id=security_group)
elif name_match:
security_group_ids.append(name_match)
elif uuid_match:
security_group_ids.append(uuid_match)
touched_port_ids = []
created_port_ids = []
ports_in_requested_order = []
for network in nets:
# If security groups are requested on an instance then the
# network must has a subnet associated with it. Some plugins
# implement the port-security extension which requires
# 'port_security_enabled' to be True for security groups.
# That is why True is returned if 'port_security_enabled'
# is not found.
if (security_groups and not (
network['subnets']
and network.get('port_security_enabled', True))):
raise exception.SecurityGroupCannotBeApplied()
network_id = network['id']
zone = 'compute:%s' % instance['availability_zone']
port_req_body = {'port': {'device_id': instance['uuid'],
'device_owner': zone}}
try:
port = ports.get(network_id)
self._populate_neutron_extension_values(context, instance,
port_req_body)
# Requires admin creds to set port bindings
port_client = (neutron if not
self._has_port_binding_extension(context) else
neutronv2.get_client(context, admin=True))
if port:
port_client.update_port(port['id'], port_req_body)
touched_port_ids.append(port['id'])
ports_in_requested_order.append(port['id'])
else:
created_port = self._create_port(
port_client, instance, network_id,
port_req_body, fixed_ips.get(network_id),
security_group_ids, available_macs, dhcp_opts)
created_port_ids.append(created_port)
ports_in_requested_order.append(created_port)
except Exception:
with excutils.save_and_reraise_exception():
for port_id in touched_port_ids:
try:
port_req_body = {'port': {'device_id': ''}}
# Requires admin creds to set port bindings
if self._has_port_binding_extension(context):
port_req_body['port']['binding:host_id'] = None
port_client = neutronv2.get_client(
context, admin=True)
else:
port_client = neutron
port_client.update_port(port_id, port_req_body)
except Exception:
msg = _("Failed to update port %s")
LOG.exception(msg, port_id)
for port_id in created_port_ids:
try:
neutron.delete_port(port_id)
except Exception:
msg = _("Failed to delete port %s")
LOG.exception(msg, port_id)
nw_info = self.get_instance_nw_info(context, instance, networks=nets,
port_ids=ports_in_requested_order)
# NOTE(danms): Only return info about ports we created in this run.
# In the initial allocation case, this will be everything we created,
# and in later runs will only be what was created that time. Thus,
# this only affects the attach case, not the original use for this
# method.
return network_model.NetworkInfo([port for port in nw_info
if port['id'] in created_port_ids +
touched_port_ids])
def _refresh_neutron_extensions_cache(self, context):
"""Refresh the neutron extensions cache when necessary."""
if (not self.last_neutron_extension_sync or
((time.time() - self.last_neutron_extension_sync)
>= CONF.neutron_extension_sync_interval)):
neutron = neutronv2.get_client(context)
extensions_list = neutron.list_extensions()['extensions']
self.last_neutron_extension_sync = time.time()
self.extensions.clear()
self.extensions = dict((ext['name'], ext)
for ext in extensions_list)
def _has_port_binding_extension(self, context, refresh_cache=False):
if refresh_cache:
self._refresh_neutron_extensions_cache(context)
return constants.PORTBINDING_EXT in self.extensions
def _populate_neutron_extension_values(self, context, instance,
port_req_body):
"""Populate neutron extension values for the instance.
If the extension contains nvp-qos then get the rxtx_factor.
"""
self._refresh_neutron_extensions_cache(context)
if 'nvp-qos' in self.extensions:
flavor = flavors.extract_flavor(instance)
rxtx_factor = flavor.get('rxtx_factor')
port_req_body['port']['rxtx_factor'] = rxtx_factor
if self._has_port_binding_extension(context):
port_req_body['port']['binding:host_id'] = instance.get('host')
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocate all network resources related to the instance."""
LOG.debug('deallocate_for_instance() for %s',
instance['display_name'])
search_opts = {'device_id': instance['uuid']}
neutron = neutronv2.get_client(context)
data = neutron.list_ports(**search_opts)
ports = [port['id'] for port in data.get('ports', [])]
requested_networks = kwargs.get('requested_networks') or {}
ports_to_skip = [port_id for nets, fips, port_id in requested_networks]
ports = set(ports) - set(ports_to_skip)
for port in ports:
try:
neutron.delete_port(port)
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
LOG.warning(_("Port %s does not exist"), port)
else:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to delete neutron port %s"),
port)
# NOTE(arosen): This clears out the network_cache only if the instance
# hasn't already been deleted. This is needed when an instance fails to
# launch and is rescheduled onto another compute node. If the instance
# has already been deleted this call does nothing.
base_api.update_instance_cache_with_nw_info(self, context, instance,
network_model.NetworkInfo([]))
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None):
"""Allocate a port for the instance."""
return self.allocate_for_instance(context, instance,
requested_networks=[(network_id, requested_ip, port_id)])
def deallocate_port_for_instance(self, context, instance, port_id):
"""Remove a specified port from the instance.
Return network information for the instance
"""
try:
neutronv2.get_client(context).delete_port(port_id)
except Exception:
LOG.exception(_("Failed to delete neutron port %s") %
port_id)
return self.get_instance_nw_info(context, instance)
def list_ports(self, context, **search_opts):
"""List ports for the client based on search options."""
return neutronv2.get_client(context).list_ports(**search_opts)
def show_port(self, context, port_id):
"""Return the port for the client given the port id."""
return neutronv2.get_client(context).show_port(port_id)
def get_instance_nw_info(self, context, instance, networks=None,
port_ids=None, use_slave=False):
"""Return network information for specified instance
and update cache.
"""
# NOTE(geekinutah): It would be nice if use_slave had us call
# special APIs that pummeled slaves instead of
# the master. For now we just ignore this arg.
result = self._get_instance_nw_info(context, instance, networks,
port_ids)
base_api.update_instance_cache_with_nw_info(self, context, instance,
result, update_cells=False)
return result
def _get_instance_nw_info(self, context, instance, networks=None,
port_ids=None):
# keep this caching-free version of the get_instance_nw_info method
# because it is used by the caching logic itself.
LOG.debug('get_instance_nw_info() for %s', instance['display_name'])
nw_info = self._build_network_info_model(context, instance, networks,
port_ids)
return network_model.NetworkInfo.hydrate(nw_info)
def _gather_port_ids_and_networks(self, context, instance, networks=None,
port_ids=None):
"""Return an instance's complete list of port_ids and networks."""
if ((networks is None and port_ids is not None) or
(port_ids is None and networks is not None)):
message = ("This method needs to be called with either "
"networks=None and port_ids=None or port_ids and "
" networks as not none.")
raise exception.NovaException(message=message)
ifaces = compute_utils.get_nw_info_for_instance(instance)
# This code path is only done when refreshing the network_cache
if port_ids is None:
port_ids = [iface['id'] for iface in ifaces]
net_ids = [iface['network']['id'] for iface in ifaces]
if networks is None:
networks = self._get_available_networks(context,
instance['project_id'],
net_ids)
# an interface was added/removed from instance.
else:
# Since networks does not contain the existing networks on the
# instance we use their values from the cache and add it.
networks = networks + [
{'id': iface['network']['id'],
'name': iface['network']['label'],
'tenant_id': iface['network']['meta']['tenant_id']}
for iface in ifaces]
# Include existing interfaces so they are not removed from the db.
port_ids = [iface['id'] for iface in ifaces] + port_ids
return networks, port_ids
@base_api.refresh_cache
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Add a fixed ip to the instance from specified network."""
search_opts = {'network_id': network_id}
data = neutronv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
if not ipam_subnets:
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'network_id': network_id}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
for subnet in ipam_subnets:
fixed_ips = p['fixed_ips']
fixed_ips.append({'subnet_id': subnet['id']})
port_req_body = {'port': {'fixed_ips': fixed_ips}}
try:
neutronv2.get_client(context).update_port(p['id'],
port_req_body)
return self._get_instance_nw_info(context, instance)
except Exception as ex:
msg = _("Unable to update port %(portid)s on subnet "
"%(subnet_id)s with failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'],
'subnet_id': subnet['id'],
'exception': ex})
raise exception.NetworkNotFoundForInstance(
instance_id=instance['uuid'])
@base_api.refresh_cache
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Remove a fixed ip from the instance."""
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone,
'fixed_ips': 'ip_address=%s' % address}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data['ports']
for p in ports:
fixed_ips = p['fixed_ips']
new_fixed_ips = []
for fixed_ip in fixed_ips:
if fixed_ip['ip_address'] != address:
new_fixed_ips.append(fixed_ip)
port_req_body = {'port': {'fixed_ips': new_fixed_ips}}
try:
neutronv2.get_client(context).update_port(p['id'],
port_req_body)
except Exception as ex:
msg = _("Unable to update port %(portid)s with"
" failure: %(exception)s")
LOG.debug(msg, {'portid': p['id'], 'exception': ex})
return self._get_instance_nw_info(context, instance)
raise exception.FixedIpNotFoundForSpecificInstance(
instance_uuid=instance['uuid'], ip=address)
def validate_networks(self, context, requested_networks, num_instances):
"""Validate that the tenant can use the requested networks.
Return the number of instances than can be successfully allocated
with the requested network configuration.
"""
LOG.debug('validate_networks() for %s',
requested_networks)
neutron = neutronv2.get_client(context)
ports_needed_per_instance = 0
if not requested_networks:
nets = self._get_available_networks(context, context.project_id,
neutron=neutron)
if len(nets) > 1:
# Attaching to more than one network by default doesn't
# make sense, as the order will be arbitrary and the guest OS
# won't know which to configure
msg = _("Multiple possible networks found, use a Network "
"ID to be more specific.")
raise exception.NetworkAmbiguous(msg)
else:
ports_needed_per_instance = 1
else:
instance_on_net_ids = []
net_ids_requested = []
for (net_id, _i, port_id) in requested_networks:
if port_id:
try:
port = neutron.show_port(port_id).get('port')
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
port = None
else:
with excutils.save_and_reraise_exception():
LOG.exception(_("Failed to access port %s"),
port_id)
if not port:
raise exception.PortNotFound(port_id=port_id)
if port.get('device_id', None):
raise exception.PortInUse(port_id=port_id)
if not port.get('fixed_ips'):
raise exception.PortRequiresFixedIP(port_id=port_id)
net_id = port['network_id']
else:
ports_needed_per_instance += 1
net_ids_requested.append(net_id)
if net_id in instance_on_net_ids:
raise exception.NetworkDuplicated(network_id=net_id)
instance_on_net_ids.append(net_id)
# Now check to see if all requested networks exist
if net_ids_requested:
nets = self._get_available_networks(
context, context.project_id, net_ids_requested,
neutron=neutron)
for net in nets:
if not net.get('subnets'):
raise exception.NetworkRequiresSubnet(
network_uuid=net['id'])
if len(nets) != len(net_ids_requested):
requested_netid_set = set(net_ids_requested)
returned_netid_set = set([net['id'] for net in nets])
lostid_set = requested_netid_set - returned_netid_set
id_str = ''
for _id in lostid_set:
id_str = id_str and id_str + ', ' + _id or _id
raise exception.NetworkNotFound(network_id=id_str)
# Note(PhilD): Ideally Nova would create all required ports as part of
# network validation, but port creation requires some details
# from the hypervisor. So we just check the quota and return
# how many of the requested number of instances can be created
if ports_needed_per_instance:
ports = neutron.list_ports(tenant_id=context.project_id)['ports']
quotas = neutron.show_quota(tenant_id=context.project_id)['quota']
if quotas.get('port') == -1:
# Unlimited Port Quota
return num_instances
else:
free_ports = quotas.get('port') - len(ports)
ports_needed = ports_needed_per_instance * num_instances
if free_ports >= ports_needed:
return num_instances
else:
return free_ports // ports_needed_per_instance
return num_instances
def _get_instance_uuids_by_ip(self, context, address):
"""Retrieve instance uuids associated with the given ip address.
:returns: A list of dicts containing the uuids keyed by 'instance_uuid'
e.g. [{'instance_uuid': uuid}, ...]
"""
search_opts = {"fixed_ips": 'ip_address=%s' % address}
data = neutronv2.get_client(context).list_ports(**search_opts)
ports = data.get('ports', [])
return [{'instance_uuid': port['device_id']} for port in ports
if port['device_id']]
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Return a list of dicts in the form of
[{'instance_uuid': uuid}] that matched the ip filter.
"""
# filters['ip'] is composed as '^%s$' % fixed_ip.replace('.', '\\.')
ip = filters.get('ip')
# we remove ^$\ in the ip filer
if ip[0] == '^':
ip = ip[1:]
if ip[-1] == '$':
ip = ip[:-1]
ip = ip.replace('\\.', '.')
return self._get_instance_uuids_by_ip(context, ip)
def _get_port_id_by_fixed_address(self, client,
instance, address):
"""Return port_id from a fixed address."""
zone = 'compute:%s' % instance['availability_zone']
search_opts = {'device_id': instance['uuid'],
'device_owner': zone}
data = client.list_ports(**search_opts)
ports = data['ports']
port_id = None
for p in ports:
for ip in p['fixed_ips']:
if ip['ip_address'] == address:
port_id = p['id']
break
if not port_id:
raise exception.FixedIpNotFoundForAddress(address=address)
return port_id
@base_api.refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associate a floating ip with a fixed ip."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
port_id = self._get_port_id_by_fixed_address(client, instance,
fixed_address)
fip = self._get_floating_ip_by_address(client, floating_address)
param = {'port_id': port_id,
'fixed_ip_address': fixed_address}
client.update_floatingip(fip['id'], {'floatingip': param})
if fip['port_id']:
port = client.show_port(fip['port_id'])['port']
orig_instance_uuid = port['device_id']
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info(_('re-assign floating IP %(address)s from '
'instance %(instance_id)s') % msg_dict)
orig_instance = self.db.instance_get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
base_api.update_instance_cache_with_nw_info(self, context,
orig_instance)
def get_all(self, context):
"""Get all networks for client."""
client = neutronv2.get_client(context)
networks = client.list_networks().get('networks')
for network in networks:
network['label'] = network['name']
return networks
def get(self, context, network_uuid):
"""Get specific network for client."""
client = neutronv2.get_client(context)
network = client.show_network(network_uuid).get('network') or {}
network['label'] = network['name']
return network
def delete(self, context, network_uuid):
"""Delete a network for client."""
raise NotImplementedError()
def disassociate(self, context, network_uuid):
"""Disassociate a network for client."""
raise NotImplementedError()
def associate(self, context, network_uuid, host=base_api.SENTINEL,
project=base_api.SENTINEL):
"""Associate a network for client."""
raise NotImplementedError()
def get_fixed_ip(self, context, id):
"""Get a fixed ip from the id."""
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
"""Return instance uuids given an address."""
uuid_maps = self._get_instance_uuids_by_ip(context, address)
if len(uuid_maps) == 1:
return uuid_maps[0]
elif not uuid_maps:
raise exception.FixedIpNotFoundForAddress(address=address)
else:
raise exception.FixedIpAssociatedWithMultipleInstances(
address=address)
def _setup_net_dict(self, client, network_id):
if not network_id:
return {}
pool = client.show_network(network_id)['network']
return {pool['id']: pool}
def _setup_port_dict(self, client, port_id):
if not port_id:
return {}
port = client.show_port(port_id)['port']
return {port['id']: port}
def _setup_pools_dict(self, client):
pools = self._get_floating_ip_pools(client)
return dict([(i['id'], i) for i in pools])
def _setup_ports_dict(self, client, project_id=None):
search_opts = {'tenant_id': project_id} if project_id else {}
ports = client.list_ports(**search_opts)['ports']
return dict([(p['id'], p) for p in ports])
def get_floating_ip(self, context, id):
"""Return floating ip object given the floating ip id."""
client = neutronv2.get_client(context)
try:
fip = client.show_floatingip(id)['floatingip']
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
raise exception.FloatingIpNotFound(id=id)
else:
with excutils.save_and_reraise_exception():
LOG.exception(_('Unable to access floating IP %s'), id)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def _get_floating_ip_pools(self, client, project_id=None):
search_opts = {constants.NET_EXTERNAL: True}
if project_id:
search_opts.update({'tenant_id': project_id})
data = client.list_networks(**search_opts)
return data['networks']
def get_floating_ip_pools(self, context):
"""Return floating ip pools."""
client = neutronv2.get_client(context)
pools = self._get_floating_ip_pools(client)
return [{'name': n['name'] or n['id']} for n in pools]
def _format_floating_ip_model(self, fip, pool_dict, port_dict):
pool = pool_dict[fip['floating_network_id']]
result = {'id': fip['id'],
'address': fip['floating_ip_address'],
'pool': pool['name'] or pool['id'],
'project_id': fip['tenant_id'],
# In Neutron v2, an exact fixed_ip_id does not exist.
'fixed_ip_id': fip['port_id'],
}
# In Neutron v2 API fixed_ip_address and instance uuid
# (= device_id) are known here, so pass it as a result.
result['fixed_ip'] = {'address': fip['fixed_ip_address']}
if fip['port_id']:
instance_uuid = port_dict[fip['port_id']]['device_id']
result['instance'] = {'uuid': instance_uuid}
else:
result['instance'] = None
return result
def get_floating_ip_by_address(self, context, address):
"""Return a floating ip given an address."""
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
pool_dict = self._setup_net_dict(client,
fip['floating_network_id'])
port_dict = self._setup_port_dict(client, fip['port_id'])
return self._format_floating_ip_model(fip, pool_dict, port_dict)
def get_floating_ips_by_project(self, context):
client = neutronv2.get_client(context)
project_id = context.project_id
fips = client.list_floatingips(tenant_id=project_id)['floatingips']
pool_dict = self._setup_pools_dict(client)
port_dict = self._setup_ports_dict(client, project_id)
return [self._format_floating_ip_model(fip, pool_dict, port_dict)
for fip in fips]
def get_floating_ips_by_fixed_address(self, context, fixed_address):
raise NotImplementedError()
def get_instance_id_by_floating_address(self, context, address):
"""Return the instance id a floating ip's fixed ip is allocated to."""
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
return None
port = client.show_port(fip['port_id'])['port']
return port['device_id']
def get_vifs_by_instance(self, context, instance):
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
raise NotImplementedError()
def _get_floating_ip_pool_id_by_name_or_id(self, client, name_or_id):
search_opts = {constants.NET_EXTERNAL: True, 'fields': 'id'}
if uuidutils.is_uuid_like(name_or_id):
search_opts.update({'id': name_or_id})
else:
search_opts.update({'name': name_or_id})
data = client.list_networks(**search_opts)
nets = data['networks']
if len(nets) == 1:
return nets[0]['id']
elif len(nets) == 0:
raise exception.FloatingIpPoolNotFound()
else:
msg = (_("Multiple floating IP pools matches found for name '%s'")
% name_or_id)
raise exception.NovaException(message=msg)
def allocate_floating_ip(self, context, pool=None):
"""Add a floating ip to a project from a pool."""
client = neutronv2.get_client(context)
pool = pool or CONF.default_floating_pool
pool_id = self._get_floating_ip_pool_id_by_name_or_id(client, pool)
# TODO(amotoki): handle exception during create_floatingip()
# At this timing it is ensured that a network for pool exists.
# quota error may be returned.
param = {'floatingip': {'floating_network_id': pool_id}}
try:
fip = client.create_floatingip(param)
except (neutron_client_exc.IpAddressGenerationFailureClient,
neutron_client_exc.ExternalIpAddressExhaustedClient) as e:
raise exception.NoMoreFloatingIps(unicode(e))
return fip['floatingip']['floating_ip_address']
def _get_floating_ip_by_address(self, client, address):
"""Get floatingip from floating ip address."""
if not address:
raise exception.FloatingIpNotFoundForAddress(address=address)
data = client.list_floatingips(floating_ip_address=address)
fips = data['floatingips']
if len(fips) == 0:
raise exception.FloatingIpNotFoundForAddress(address=address)
elif len(fips) > 1:
raise exception.FloatingIpMultipleFoundForAddress(address=address)
return fips[0]
def _get_floating_ips_by_fixed_and_port(self, client, fixed_ip, port):
"""Get floatingips from fixed ip and port."""
try:
data = client.list_floatingips(fixed_ip_address=fixed_ip,
port_id=port)
# If a neutron plugin does not implement the L3 API a 404 from
# list_floatingips will be raised.
except neutronv2.exceptions.NeutronClientException as e:
if e.status_code == 404:
return []
with excutils.save_and_reraise_exception():
LOG.exception(_('Unable to access floating IP %(fixed_ip)s '
'for port %(port_id)s'),
{'fixed_ip': fixed_ip, 'port_id': port})
return data['floatingips']
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Remove a floating ip with the given address from a project."""
# Note(amotoki): We cannot handle a case where multiple pools
# have overlapping IP address range. In this case we cannot use
# 'address' as a unique key.
# This is a limitation of the current nova.
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if fip['port_id']:
raise exception.FloatingIpAssociated(address=address)
client.delete_floatingip(fip['id'])
@base_api.refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociate a floating ip from the instance."""
# Note(amotoki): 'affect_auto_assigned' is not respected
# since it is not used anywhere in nova code and I could
# find why this parameter exists.
client = neutronv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
if not self._has_port_binding_extension(context, refresh_cache=True):
return
neutron = neutronv2.get_client(context, admin=True)
search_opts = {'device_id': instance['uuid'],
'tenant_id': instance['project_id']}
data = neutron.list_ports(**search_opts)
ports = data['ports']
for p in ports:
port_req_body = {'port': {'binding:host_id':
migration['dest_compute']}}
try:
neutron.update_port(p['id'], port_req_body)
except Exception:
with excutils.save_and_reraise_exception():
msg = _("Unable to update host of port %s")
LOG.exception(msg, p['id'])
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force add a network to the project."""
raise NotImplementedError()
def _nw_info_get_ips(self, client, port):
network_IPs = []
for fixed_ip in port['fixed_ips']:
fixed = network_model.FixedIP(address=fixed_ip['ip_address'])
floats = self._get_floating_ips_by_fixed_and_port(
client, fixed_ip['ip_address'], port['id'])
for ip in floats:
fip = network_model.IP(address=ip['floating_ip_address'],
type='floating')
fixed.add_floating_ip(fip)
network_IPs.append(fixed)
return network_IPs
def _nw_info_get_subnets(self, context, port, network_IPs):
subnets = self._get_subnets_from_port(context, port)
for subnet in subnets:
subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
if fixed_ip.is_in_subnet(subnet)]
return subnets
def _nw_info_build_network(self, port, networks, subnets):
network_name = None
for net in networks:
if port['network_id'] == net['id']:
network_name = net['name']
tenant_id = net['tenant_id']
break
else:
tenant_id = port['tenant_id']
LOG.warning(_("Network %(id)s not matched with the tenants "
"network! The ports tenant %(tenant_id)s will be "
"used."),
{'id': port['network_id'], 'tenant_id': tenant_id})
bridge = None
ovs_interfaceid = None
# Network model metadata
should_create_bridge = None
vif_type = port.get('binding:vif_type')
# TODO(berrange) Neutron should pass the bridge name
# in another binding metadata field
if vif_type == network_model.VIF_TYPE_OVS:
bridge = CONF.neutron_ovs_bridge
ovs_interfaceid = port['id']
elif vif_type == network_model.VIF_TYPE_BRIDGE:
bridge = "brq" + port['network_id']
should_create_bridge = True
if bridge is not None:
bridge = bridge[:network_model.NIC_NAME_LEN]
network = network_model.Network(
id=port['network_id'],
bridge=bridge,
injected=CONF.flat_injected,
label=network_name,
tenant_id=tenant_id
)
network['subnets'] = subnets
port_profile = port.get('binding:profile')
if port_profile:
physical_network = port_profile.get('physical_network')
if physical_network:
network['physical_network'] = physical_network
if should_create_bridge is not None:
network['should_create_bridge'] = should_create_bridge
return network, ovs_interfaceid
def _build_network_info_model(self, context, instance, networks=None,
port_ids=None):
"""Return list of ordered VIFs attached to instance.
:param context - request context.
:param instance - instance we are returning network info for.
:param networks - List of networks being attached to an instance.
If value is None this value will be populated
from the existing cached value.
:param port_ids - List of port_ids that are being attached to an
instance in order of attachment. If value is None
this value will be populated from the existing
cached value.
"""
search_opts = {'tenant_id': instance['project_id'],
'device_id': instance['uuid'], }
client = neutronv2.get_client(context, admin=True)
data = client.list_ports(**search_opts)
current_neutron_ports = data.get('ports', [])
networks, port_ids = self._gather_port_ids_and_networks(
context, instance, networks, port_ids)
nw_info = network_model.NetworkInfo()
current_neutron_port_map = {}
for current_neutron_port in current_neutron_ports:
current_neutron_port_map[current_neutron_port['id']] = (
current_neutron_port)
for port_id in port_ids:
current_neutron_port = current_neutron_port_map.get(port_id)
if current_neutron_port:
vif_active = False
if (current_neutron_port['admin_state_up'] is False
or current_neutron_port['status'] == 'ACTIVE'):
vif_active = True
network_IPs = self._nw_info_get_ips(client,
current_neutron_port)
subnets = self._nw_info_get_subnets(context,
current_neutron_port,
network_IPs)
devname = "tap" + current_neutron_port['id']
devname = devname[:network_model.NIC_NAME_LEN]
network, ovs_interfaceid = (
self._nw_info_build_network(current_neutron_port,
networks, subnets))
nw_info.append(network_model.VIF(
id=current_neutron_port['id'],
address=current_neutron_port['mac_address'],
network=network,
type=current_neutron_port.get('binding:vif_type'),
details=current_neutron_port.get('binding:vif_details'),
ovs_interfaceid=ovs_interfaceid,
devname=devname,
active=vif_active))
return nw_info
def _get_subnets_from_port(self, context, port):
"""Return the subnets for a given port."""
fixed_ips = port['fixed_ips']
# No fixed_ips for the port means there is no subnet associated
# with the network the port is created on.
# Since list_subnets(id=[]) returns all subnets visible for the
# current tenant, returned subnets may contain subnets which is not
# related to the port. To avoid this, the method returns here.
if not fixed_ips:
return []
search_opts = {'id': [ip['subnet_id'] for ip in fixed_ips]}
data = neutronv2.get_client(context).list_subnets(**search_opts)
ipam_subnets = data.get('subnets', [])
subnets = []
for subnet in ipam_subnets:
subnet_dict = {'cidr': subnet['cidr'],
'gateway': network_model.IP(
address=subnet['gateway_ip'],
type='gateway'),
}
# attempt to populate DHCP server field
search_opts = {'network_id': subnet['network_id'],
'device_owner': 'network:dhcp'}
data = neutronv2.get_client(context).list_ports(**search_opts)
dhcp_ports = data.get('ports', [])
for p in dhcp_ports:
for ip_pair in p['fixed_ips']:
if ip_pair['subnet_id'] == subnet['id']:
subnet_dict['dhcp_server'] = ip_pair['ip_address']
break
subnet_object = network_model.Subnet(**subnet_dict)
for dns in subnet.get('dns_nameservers', []):
subnet_object.add_dns(
network_model.IP(address=dns, type='dns'))
# TODO(gongysh) get the routes for this subnet
subnets.append(subnet_object)
return subnets
def get_dns_domains(self, context):
"""Return a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
raise NotImplementedError()
def _ensure_requested_network_ordering(accessor, unordered, preferred):
"""Sort a list with respect to the preferred network ordering."""
if preferred:
unordered.sort(key=lambda i: preferred.index(accessor(i)))
|
{
"content_hash": "443fbeea649c2069b5d37dfaadffc9b9",
"timestamp": "",
"source": "github",
"line_count": 1223,
"max_line_length": 79,
"avg_line_length": 46.0670482420278,
"alnum_prop": 0.5582712105076323,
"repo_name": "CiscoSystems/nova",
"id": "f397cbe8ef102648ddcb081ffe9440ec851080a3",
"size": "57014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/network/neutronv2/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13926229"
},
{
"name": "Shell",
"bytes": "17451"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: lambda_alias
short_description: Creates, updates or deletes AWS Lambda function aliases.
description:
- This module allows the management of AWS Lambda functions aliases via the Ansible
framework. It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda function
itself and M(lambda_event) to manage event source mappings.
version_added: "2.2"
author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb)
options:
function_name:
description:
- The name of the function alias.
required: true
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
name:
description:
- Name of the function alias.
required: true
aliases: ['alias_name']
description:
description:
- A short, user-defined function alias description.
required: false
version:
description:
- Version associated with the Lambda function alias.
A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
required: false
aliases: ['function_version']
requirements:
- boto3
extends_documentation_fragment:
- aws
'''
EXAMPLES = '''
---
# Simple example to create a lambda function and publish a version
- hosts: localhost
gather_facts: no
vars:
state: present
project_folder: /path/to/deployment/package
deployment_package: lambda.zip
account: 123456789012
production_version: 5
tasks:
- name: AWS Lambda Function
lambda:
state: "{{ state | default('present') }}"
name: myLambdaFunction
publish: True
description: lambda function description
code_s3_bucket: package-bucket
code_s3_key: "lambda/{{ deployment_package }}"
local_path: "{{ project_folder }}/{{ deployment_package }}"
runtime: python2.7
timeout: 5
handler: lambda.handler
memory_size: 128
role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
- name: show results
debug:
var: lambda_facts
# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
- name: "alias 'Dev' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: Dev
description: Development is $LATEST version
# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
- name: "alias 'QA' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: QA
version: "{{ lambda_facts.Version }}"
description: "QA is version {{ lambda_facts.Version }}"
when: lambda_facts.Version != "$LATEST"
# The Prod alias will have a fixed version based on a variable
- name: "alias 'Prod' for function {{ lambda_facts.FunctionName }} "
lambda_alias:
state: "{{ state | default('present') }}"
function_name: "{{ lambda_facts.FunctionName }}"
name: Prod
version: "{{ production_version }}"
description: "Production is version {{ production_version }}"
'''
RETURN = '''
---
alias_arn:
description: Full ARN of the function, including the alias
returned: success
type: string
sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
description:
description: A short description of the alias
returned: success
type: string
sample: The development stage for my hot new app
function_version:
description: The qualifier that the alias refers to
returned: success
type: string
sample: $LATEST
name:
description: The name of the alias assigned
returned: success
type: string
sample: dev
'''
import re
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec,
get_aws_connection_info)
class AWSConnection:
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, boto3_=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3_)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
"""
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict()
for param in module_params:
module_param = module.params.get(param, None)
if module_param:
api_params[pc(param)] = module_param
return api_params
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
function_name = module.params['function_name']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64:
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
# if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
if module.params['function_version'] == 0:
module.params['function_version'] = '$LATEST'
else:
module.params['function_version'] = str(module.params['function_version'])
return
def get_lambda_alias(module, aws):
"""
Returns the lambda function alias if it exists.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
client = aws.client('lambda')
# set API parameters
api_params = set_api_params(module, ('function_name', 'name'))
# check if alias exists and get facts
try:
results = client.get_alias(**api_params)
except (ClientError, ParamValidationError, MissingParametersError) as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
results = None
else:
module.fail_json(msg='Error retrieving function alias: {0}'.format(e))
return results
def lambda_alias(module, aws):
"""
Adds, updates or deletes lambda function aliases.
:param module: Ansible module reference
:param aws: AWS client connection
:return dict:
"""
client = aws.client('lambda')
results = dict()
changed = False
current_state = 'absent'
state = module.params['state']
facts = get_lambda_alias(module, aws)
if facts:
current_state = 'present'
if state == 'present':
if current_state == 'present':
# check if alias has changed -- only version and description can change
alias_params = ('function_version', 'description')
for param in alias_params:
if module.params.get(param) != facts.get(pc(param)):
changed = True
break
if changed:
api_params = set_api_params(module, ('function_name', 'name'))
api_params.update(set_api_params(module, alias_params))
if not module.check_mode:
try:
results = client.update_alias(**api_params)
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating function alias: {0}'.format(e))
else:
# create new function alias
api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description'))
try:
if not module.check_mode:
results = client.create_alias(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating function alias: {0}'.format(e))
else: # state = 'absent'
if current_state == 'present':
# delete the function
api_params = set_api_params(module, ('function_name', 'name'))
try:
if not module.check_mode:
results = client.delete_alias(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error deleting function alias: {0}'.format(e))
return dict(changed=changed, **dict(results or facts))
def main():
"""
Main entry point.
:return dict: ansible facts
"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
function_name=dict(required=True, default=None),
name=dict(required=True, default=None, aliases=['alias_name']),
function_version=dict(type='int', required=False, default=0, aliases=['version']),
description=dict(required=False, default=None),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[],
required_together=[]
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['lambda'])
validate_params(module, aws)
results = lambda_alias(module, aws)
module.exit_json(**camel_dict_to_snake_dict(results))
if __name__ == '__main__':
main()
|
{
"content_hash": "9fa8e9905df50467a202fdb1f0b7fc4d",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 130,
"avg_line_length": 31.25065963060686,
"alnum_prop": 0.6176122931442081,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "78aae3cdb051cd277b850d58a1e684648cc8822a",
"size": "11985",
"binary": false,
"copies": "26",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/lambda_alias.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
}
|
import sys
import os
import shutil
from datetime import datetime
def createBackup(rootDir, backupPath):
for root, dirs, files in os.walk(rootDir):
for f in files:
filename = (f).replace('\\', "/")
rVal = root.replace(rootDir, "")
dst = (os.path.join("./", backupPath + "/" + rVal + "/" + filename)).replace("\\", "/").replace("//", "/")
src = os.path.join(root, filename)
# make paths that don't exist
if (not os.path.exists(os.path.dirname(dst))):
os.makedirs(os.path.dirname(dst))
# copy files to backup
shutil.copyfile(src, dst)
def replaceInFile(src, findStr1, findStr2, replaceStr1, replaceStr2):
# read the file line by line, store it.
lines = [line for line in open(src)]
newLines = []
for l in lines:
a = l
if (findStr1 in l) and (findStr2 in l):
a = a.replace(findStr1, replaceStr1).replace(findStr2, replaceStr2)
if (a != l):
print "In %s, line%i:\n %s\nIS NOW\n %s"%(src, lines.index(l), l, a)
newLines.append(a)
try:
fl = open(src, 'w')
for item in newLines:
fl.write("%s" % item)
fl.close()
#print "Replaced '%s' with '%s' in %s."%(findStr, replaceStr, src)
except ValueError:
print ValueError
def main():
rootDir = "../scripts/widgets"
backupDir = "scriptsBackup"
findStr1 = "$("
findStr2 = ").css("
replaceStr1 = "utils.css.setCSS( "
replaceStr2 = ", "
skipDirs = ["jquery"];
backupPath = os.path.join("./", backupDir) + "_" + datetime.now().strftime("%Y-%m-%d %H:%M:%S").replace(':','_').replace(" ", "__").strip()
# make a backup folder
# clear it
if (not os.path.exists(backupPath)):
os.mkdir(backupPath)
createBackup(rootDir, backupPath)
for root, dirs, files in os.walk(rootDir):
for f in files:
skip = False
for d in skipDirs:
if (len(d) > 0) and (d in root):
skip = True;
break;
if not skip:
filename = (f).replace('\\', "/")
src = os.path.join(root, filename)
dst = src;
replaceInFile(dst, findStr1, findStr2, replaceStr1, replaceStr2)
else:
print ("\n [SKIPPING %s]" %(root))
if __name__ == "__main__":
main()
|
{
"content_hash": "d0ca39e7aa9bbbb334b571351dfd7f89",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 153,
"avg_line_length": 29.945054945054945,
"alnum_prop": 0.4704587155963303,
"repo_name": "evast/XNATImageViewer",
"id": "533eb4adaa9d6014125fce095a71642f3fcfc15c",
"size": "2725",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "utility-scripts/python/_old/replaceString-JqueryFrame.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "737722"
},
{
"name": "Emacs Lisp",
"bytes": "2410"
},
{
"name": "HTML",
"bytes": "3793153"
},
{
"name": "JavaScript",
"bytes": "18990161"
},
{
"name": "Nu",
"bytes": "533"
},
{
"name": "Python",
"bytes": "756013"
},
{
"name": "Ruby",
"bytes": "1760"
},
{
"name": "Shell",
"bytes": "9315"
}
],
"symlink_target": ""
}
|
'''
Created on 25 Jun 2015
@author: meghann
'''
import tkinter as tk
import tkinter.font
from tkinter import ttk
import lists
import energycalc
class Options(ttk.Labelframe):
def __init__(self, parent):
ttk.LabelFrame.__init__(self, parent, text="Fuel comparison options")
self.parent = parent
self.fuelType = tk.StringVar()
self.fuelQuantity = tk.StringVar()
self.fuelQuantityUnit = tk.StringVar()
self.fuelEnergyPerUnit = tk.StringVar()
self.fuelEnergyPerUnitUnit = tk.StringVar()
self.fuelEnergyTotal = tk.StringVar()
self.fuelEnergyTotalUnit = tk.StringVar()
self.fuelCost = tk.StringVar()
self.fuelCostUnit = tk.StringVar()
self.fuelTotalCost = tk.StringVar()
self.multiplierText = tk.StringVar()
self.initialise()
def switchFuel(self):
# Invoked when fuel type is changed
fuel = self.fuelType.get()
# Look up unit and energy density for fuel type and
# set labels accordingly
fuelUnit = lists.fuelUnits[fuel]
self.fuelQuantityUnit.set(fuelUnit)
self.fuelEnergyPerUnitUnit.set('kJ/' + fuelUnit)
self.fuelCostUnit.set('£/' + fuelUnit)
self.fuelCost.set(lists.fuelDefaultCost[fuel])
self.fuelEnergyPerUnit.set(lists.fuelEnergyDensity[fuel])
# Call method to update calculations
self.calcTotals()
def calcTotals(self):
#Invoked when fuel type, quantity or cost is changed
# Check fields are not empty to prevent errors
if self.fuelQuantity.get() != '' and self.fuelCost.get() != '':
# Calculate total energy content and cost of fuel,
# and update label text
fuel = self.fuelType.get()
fuelQuantity = self.fuelQuantity.get()
self.multiplierText.set('* ' + str(fuelQuantity) +
lists.fuelUnits[fuel] + ' =')
self.fuelEnergyTotal.set(round(float(fuelQuantity) *
lists.fuelEnergyDensity[fuel]))
self.fuelTotalCost.set(round(float(fuelQuantity) *
float(self.fuelCost.get()), 2))
self.updateLabels()
def updateLabels(self):
# Invoked when fuel type, quantity or cost is changed
# Invoked by calcTotals method
fuelType = self.fuelType.get()
fuelQuantity = self.fuelQuantity.get()
if fuelQuantity != '' and fuelType != '':
self.parent.comparisons.costPerKmLabel.set(
'Cost to travel 1km on ' + fuelType +
' (pence/km)')
self.parent.comparisons.kmPerPoundLabel.set(
'km travelled per £1 ' + fuelType
+ ' (km/£)')
self.parent.comparisons.totalKmLabel.set('km travelled on ' +
fuelQuantity +
self.fuelQuantityUnit.get()
+ ' ' + fuelType)
else:
self.parent.comparisons.costPerKmLabel.set(
'Cost to travel 1km on selected fuel (£/km)')
self.parent.comparisons.kmPerPoundLabel.set(
'km travelled per £1 selected fuel (km/£)')
self.parent.comparisons.totalKmLabel.set(
'km travelled on selected fuel/quantity')
def initialise(self):
# Create and pack widgets
# First row
ttk.Label(self, text="Comparison fuel:").grid(column=0, row=0,
sticky=(tk.W), padx=5)
ttk.Combobox(self, textvariable=self.fuelType, values=lists.fuelTypes,
state="readonly", width=8).grid(column=1, row=0,
columnspan=2,
sticky=(tk.W))
ttk.Label(self, text="Fuel quantity:").grid(column=3, row=0,
sticky=(tk.W))
ttk.Entry(self, textvariable=self.fuelQuantity,
width=7).grid(column=4, row=0, sticky=(tk.W))
ttk.Label(self, textvariable=self.fuelQuantityUnit,
width=3).grid(column=5, row=0, sticky=(tk.W))
# Second row
ttk.Label(self, text="Fuel energy content:").grid(column=0, row=1,
sticky=(tk.W),
padx=5)
ttk.Label(self, textvariable=self.fuelEnergyPerUnit,
width=5).grid(column=1, row=1, sticky=(tk.W))
ttk.Label(self, textvariable=self.fuelEnergyPerUnitUnit,
width=4).grid(column=2, row=1, sticky=(tk.W))
ttk.Label(self, textvariable=self.multiplierText).grid(column=3,
row=1,
sticky=(tk.W))
ttk.Label(self, textvariable=self.fuelEnergyTotal,
width=7).grid(column=4, row=1, sticky=(tk.W))
ttk.Label(self, text='kJ', width=3).grid(column=5, row=1,
sticky=(tk.W))
# Third row
ttk.Label(self, text="Fuel cost:").grid(column=0, row=2,
sticky=(tk.W), padx=5)
ttk.Entry(self, textvariable=self.fuelCost,
width=5).grid(column=1, row=2, sticky=(tk.W))
ttk.Label(self, textvariable=self.fuelCostUnit,
width=4).grid(column=2, row=2, sticky=(tk.W))
ttk.Label(self, textvariable=self.multiplierText).grid(column=3,
row=2,
sticky=(tk.W))
ttk.Label(self, textvariable=self.fuelTotalCost,
width=7).grid(column=4, row=2, sticky=(tk.W))
ttk.Label(self, text='£', width=3).grid(column=5, row=2,
sticky=(tk.W))
# Column & row config
self.columnconfigure(0, weight=3, pad=5)
self.columnconfigure(1, weight=2, pad=2)
self.columnconfigure(2, weight=1, pad=25)
self.columnconfigure(3, weight=3, pad=5)
self.columnconfigure(4, weight=2, pad=2)
self.columnconfigure(5, weight=1, pad=5)
self.rowconfigure(0, weight=1, pad=5)
self.rowconfigure(1, weight=1, pad=5)
self.rowconfigure(2, weight=1, pad=5)
def initialiseContents(self):
# Set tracers and initialise values
# Change default values and units when fuel type is changed
self.fuelType.trace('w', lambda name, index, mode:
self.switchFuel())
# Calculate total energy and costs when fuel quantity
# or cost is changed
self.fuelQuantity.trace('w', lambda name, index, mode:
self.calcTotals())
self.fuelCost.trace('w', lambda name, index, mode:
self.calcTotals())
# Update fuel comparison calculations when fuel, quantity or
# cost changed
self.fuelEnergyPerUnit.trace('w', lambda name, index, mode:
energycalc.comparison(self.parent, 'Both',
self.parent.options.fuelEnergyPerUnit,
self.parent.options.fuelEnergyTotal,
self.parent.options.fuelCost,
self.parent.options.fuelQuantity,
self.parent.comparisons.carEnergyUse,
self.parent.comparisons.cyclistEnergyUse))
self.fuelEnergyTotal.trace('w', lambda name, index, mode:
energycalc.comparison(self.parent, 'Both',
self.parent.options.fuelEnergyPerUnit,
self.parent.options.fuelEnergyTotal,
self.parent.options.fuelCost,
self.parent.options.fuelQuantity,
self.parent.comparisons.carEnergyUse,
self.parent.comparisons.cyclistEnergyUse))
self.fuelTotalCost.trace('w', lambda name, index, mode:
energycalc.comparison(self.parent, 'Both',
self.parent.options.fuelEnergyPerUnit,
self.parent.options.fuelEnergyTotal,
self.parent.options.fuelCost,
self.parent.options.fuelQuantity,
self.parent.comparisons.carEnergyUse,
self.parent.comparisons.cyclistEnergyUse))
# Set initial values (invoke tracer methods)
self.fuelType.set('Petrol')
self.fuelQuantity.set('1')
class CyclistData(ttk.Labelframe):
def __init__(self, parent):
ttk.LabelFrame.__init__(self, parent, text="Cyclist data")
self.parent = parent
self.cyclistWeight = tk.StringVar()
self.bikeWeight = tk.StringVar()
self.velocity = tk.StringVar()
self.initialise()
def initialise(self):
# Create and pack widgets
# First row
ttk.Label(self, text="Cyclist weight:").grid(column=0, row=0,
sticky=(tk.W), padx=5)
ttk.Entry(self, textvariable=self.cyclistWeight,
width=5).grid(column=1, row=0)
ttk.Label(self, text="kg").grid(column=2, row=0, sticky=(tk.W))
# Second row
ttk.Label(self, text="Bike weight:").grid(column=0, row=1,
sticky=(tk.W), padx=5)
ttk.Entry(self, textvariable=self.bikeWeight,
width=5).grid(column=1, row=1)
ttk.Label(self, text="kg").grid(column=2, row=1, sticky=(tk.W))
# Third row
ttk.Label(self, text="Velocity:").grid(column=0, row=2, sticky=(tk.W),
padx=5)
ttk.Entry(self, textvariable=self.velocity,
width=5).grid(column=1, row=2)
ttk.Label(self, text="km/hr").grid(column=2, row=2, sticky=(tk.W))
# Column & row config
self.columnconfigure(0, weight=2, pad=5)
self.columnconfigure(1, weight=1, pad=2)
self.columnconfigure(2, weight=1, pad=5)
self.rowconfigure(0, weight=1, pad=5)
self.rowconfigure(1, weight=1, pad=5)
self.rowconfigure(2, weight=1, pad=5)
def initialiseContents(self):
# Set tracers
# Update energy efficiency data in comparisons frame when
# weights or velocity change
self.cyclistWeight.trace('w', lambda name, index, mode:
energycalc.cyclistEnergyUse(self.parent,
self.cyclistWeight,
self.bikeWeight,
self.velocity))
self.bikeWeight.trace('w', lambda name, index, mode:
energycalc.cyclistEnergyUse(self.parent,
self.cyclistWeight,
self.bikeWeight,
self.velocity))
self.velocity.trace('w', lambda name, index, mode:
energycalc.cyclistEnergyUse(self.parent,
self.cyclistWeight,
self.bikeWeight,
self.velocity))
self.cyclistWeight.set('')
class CarData(ttk.Frame):
def __init__(self, parent):
ttk.LabelFrame.__init__(self, parent, text="Car data")
self.parent = parent
self.useCarLookup = tk.StringVar()
self.carManuf = tk.StringVar()
self.carModel = tk.StringVar()
self.carVariant = tk.StringVar()
self.carFuel = tk.StringVar()
self.drivingStyle = tk.StringVar()
self.carEfficiency = tk.StringVar()
self.initialise()
def switchCarLookup(self):
# Turn car lookup options on (use lookup) or off (enter fuel
# type and efficiency manually).
# Invoked by radio buttons.
if self.useCarLookup.get() == 'True':
self.carManufSelect['state'] = 'readonly'
self.carModelSelect['state'] = 'readonly'
self.carVariantSelect['state'] = 'readonly'
self.drivingStyleCombo['state'] = 'readonly'
self.carManufLabel['state'] = 'enabled'
self.carModelLabel['state'] = 'enabled'
self.carVariantLabel['state'] = 'enabled'
self.drivingStyleLabel['state'] = 'enabled'
self.carEfficiencyEntry.grid_remove()
self.carEfficiencyLabel.grid(column=0, row=0)
self.carFuelCombo.grid_remove()
self.carFuelLookupLabel.grid(column=1, row=3, sticky=(tk.W))
self.carEfficiency.set('-')
self.carFuel.set('-')
else:
self.carManufSelect['state'] = 'disabled'
self.carModelSelect['state'] = 'disabled'
self.carVariantSelect['state'] = 'disabled'
self.drivingStyleCombo['state'] = 'disabled'
self.carManufLabel['state'] = 'disabled'
self.carModelLabel['state'] = 'disabled'
self.carVariantLabel['state'] = 'disabled'
self.drivingStyleLabel['state'] = 'disabled'
self.carManuf.set('')
self.carModel.set('')
self.carVariant.set('')
self.drivingStyle.set('')
self.carEfficiencyLabel.grid_remove()
self.carEfficiencyEntry.grid(column=0, row=0)
self.carFuelLookupLabel.grid_remove()
self.carFuelCombo.grid(column=1, row=3, sticky=(tk.W))
def switchModels(self):
# Fill models combobox with relevant items when manufacturer
# is selected or changed. Also empties variants combobox and
# resets selections and lookups.
# Invoked by manufacturer combobox.
if self.carManuf.get() != '':
self.carModelSelect['values'] = \
lists.manufacturerModels[self.carManuf.get()]
self.carVariantSelect['values'] = ''
self.carModel.set('')
self.carVariant.set('')
self.carFuel.set('-')
self.carEfficiency.set('-')
def switchVariants(self):
# Fill variants combobox with relevant items when model is selected
# or changed. Also resets selection and lookups.
# Invoked by model combobox.
if (self.carModel.get() != '' and
self.carModel.get() != 'Select a manufacturer'):
self.carVariantSelect['values'] = \
lists.modelVariants[self.carModel.get()]
self.carVariant.set('')
self.carFuel.set('-')
self.carEfficiency.set('-')
def fuelLookup(self):
# Look up fuel efficiency once driving style and car variant
# are set. Also looks up fuel type from variant.
# Invoked by variant combobox and driving style combobox.
if (self.carVariant.get() != '' and
self.carVariant.get() != 'Select a model'):
self.carFuel.set(lists.lookupFuelType[(self.carManuf.get(),
self.carModel.get(),
self.carVariant.get())])
if self.drivingStyle.get() == 'Urban':
self.carEfficiency.set(lists.lookupUrban
[(self.carManuf.get(),
self.carModel.get(),
self.carVariant.get())])
elif self.drivingStyle.get() == 'Extra urban':
self.carEfficiency.set(lists.lookupExtraUrban
[(self.carManuf.get(),
self.carModel.get(),
self.carVariant.get())])
elif self.drivingStyle.get() == 'Combined':
self.carEfficiency.set(lists.lookupCombined
[(self.carManuf.get(),
self.carModel.get(),
self.carVariant.get())])
def initialise(self):
# Create and pack widgets
# First row
self.useLookupButton = ttk.Radiobutton(self,
text="Use fuel data lookup",
variable=self.useCarLookup,
value='True')
self.useLookupButton.grid(column=0, row=0, columnspan=2, padx=5)
ttk.Radiobutton(self, text="Enter fuel data manually",
variable=self.useCarLookup,
value='False').grid(column=2, row=0, columnspan=2)
# Second row
self.carManufLabel = ttk.Label(self, text="Manufacturer:")
self.carManufLabel.grid(column=0, row=1, sticky=(tk.W), padx=5)
self.carManufSelect = ttk.Combobox(self, textvariable=self.carManuf,
values=lists.manufacturers,
state='readonly', width=20)
self.carManufSelect.grid(column=1, row=1, sticky=(tk.W))
self.carModelLabel = ttk.Label(self, text="Model:")
self.carModelLabel.grid(column=2, row=1, sticky=(tk.W))
self.carModelSelect = ttk.Combobox(self, textvariable=self.carModel,
values=['Select a manufacturer'],
state='readonly', width=24)
self.carModelSelect.grid(column=3, row=1, sticky=(tk.W))
# Third row
self.carVariantLabel = ttk.Label(self, text="Description:")
self.carVariantLabel.grid(column=0, row=2, sticky=(tk.W), padx=5)
self.carVariantSelect = ttk.Combobox(self,
textvariable=self.carVariant,
values=['Select a model'],
state='readonly', width=60)
self.carVariantSelect.grid(column=1, row=2, columnspan=3,
sticky=(tk.W))
# Fourth row
self.carFuelLabel = ttk.Label(self, text="Fuel type:")
self.carFuelLabel.grid(column=0, row=3, sticky=(tk.W), padx=5)
self.carFuelLookupLabel = ttk.Label(self, textvariable=self.carFuel)
self.carFuelLookupLabel.grid(column=1, row=3, sticky=(tk.W))
self.carFuelCombo = ttk.Combobox(self, textvariable=self.carFuel,
values=['Petrol', 'Diesel'],
state='readonly', width=10)
self.drivingStyleLabel = ttk.Label(self, text="Driving style:")
self.drivingStyleLabel.grid(column=2, row=3, sticky=(tk.W))
self.drivingStyleCombo = ttk.Combobox(self,
textvariable=self.drivingStyle,
values=lists.drivingStyles,
state='readonly', width=12)
self.drivingStyleCombo.grid(column=3, row=3, sticky=(tk.W))
# Fifth row
ttk.Label(self, text="Fuel efficiency:").grid(column=0, row=4,
sticky=(tk.W), padx=5)
self.carEfficiencyFrame = ttk.Frame(self)
self.carEfficiencyFrame.grid(column=1, row=4, sticky=(tk.W))
self.carEfficiencyLabel = ttk.Label(self.carEfficiencyFrame,
textvariable=self.carEfficiency,
width=5)
self.carEfficiencyLabel.grid(column=1, row=0, sticky=(tk.W))
self.carEfficiencyEntry = ttk.Entry(self.carEfficiencyFrame,
textvariable=self.carEfficiency,
width=5)
ttk.Label(self.carEfficiencyFrame, text="L/100km").grid(column=2,
row=0,
sticky=(tk.W))
# Column & row config
self.columnconfigure(0, weight=2, pad=5)
self.columnconfigure(1, weight=3, pad=25)
self.columnconfigure(2, weight=2, pad=5)
self.columnconfigure(3, weight=3, pad=5)
self.rowconfigure(0, weight=1, pad=5)
self.rowconfigure(1, weight=1, pad=5)
self.rowconfigure(2, weight=1, pad=5)
self.rowconfigure(3, weight=1, pad=5)
self.rowconfigure(4, weight=1, pad=5)
def initialiseContents(self):
# Set tracers and initialise values
# Turn fields on/off when switching between lookup and manual entry
self.useCarLookup.trace('w', lambda name, index, mode:
self.switchCarLookup())
# Set default option for using fuel data lookup
self.useLookupButton.invoke()
# Change models list when manufacturer changed/reset other fields
self.carManuf.trace('w', lambda name, index, mode:
self.switchModels())
# Change variants list when model changed/reset other fields
self.carModel.trace('w', lambda name, index, mode:
self.switchVariants())
# Look up fuel type and efficiency when variant/driving style set
self.carVariant.trace('w', lambda name, index, mode:
self.fuelLookup())
self.drivingStyle.trace('w', lambda name, index, mode:
self.fuelLookup())
# Update calculations in comparisons frame when fuel type or
# efficiency changed
self.carEfficiency.trace('w', lambda name, index, mode:
energycalc.carEnergyUse(self.parent,
self.carFuel,
self.carEfficiency))
self.carFuel.trace('w', lambda name, index, mode:
energycalc.carEnergyUse(self.parent,
self.carFuel,
self.carEfficiency))
self.carEfficiency.set('-')
class Comparisons(ttk.Frame):
def __init__(self, parent):
ttk.LabelFrame.__init__(self, parent, text="Energy use comparison")
self.parent = parent
self.cyclistEnergyUse = tk.StringVar()
self.carEnergyUse = tk.StringVar()
self.cyclistDistance = tk.StringVar()
self.carDistance = tk.StringVar()
self.cyclistCost = tk.StringVar()
self.carCost = tk.StringVar()
self.cyclistDistCost = tk.StringVar()
self.carDistCost = tk.StringVar()
self.cyclistComparisonDist = tk.StringVar()
self.carComparisonDist = tk.StringVar()
self.costPerKmLabel = tk.StringVar()
self.kmPerPoundLabel = tk.StringVar()
self.totalKmLabel = tk.StringVar()
self.initialise()
def initialise(self):
# Define font for italic labels
italics = tkinter.font.Font(font=str(tkinter.font.Font(
font='TkDefaultFont').configure()))
italics['slant'] = 'italic'
italics['size'] = -12
# Create and pack widgets
# First row
ttk.Label(self, text="Cyclist").grid(column=1, row=0)
ttk.Label(self, text=" Car ").grid(column=2, row=0)
# Second row
italicLabel1 = ttk.Label(self,
text="Calculated from cyclist and car data:")
italicLabel1['font'] = italics
italicLabel1.grid(column=0, row=1, sticky=(tk.W), padx=5)
# Third row
ttk.Label(self, text="Fuel kJ used per km travelled (kJ/km)").grid(
column=0, row=2, sticky=(tk.E))
ttk.Label(self, textvariable=self.cyclistEnergyUse).grid(column=1,
row=2)
ttk.Label(self, textvariable=self.carEnergyUse).grid(column=2, row=2)
# Fourth row
ttk.Label(self, text="Meters travelled per kJ fuel (m/kJ)").grid(
column=0, row=3, sticky=(tk.E))
ttk.Label(self, textvariable=self.cyclistDistance).grid(column=1,
row=3)
ttk.Label(self, textvariable=self.carDistance).grid(column=2, row=3)
# Fifth row
italicLabel2 = ttk.Label(self,
text="Calculated from selected fuel comparison options:")
italicLabel2['font'] = italics
italicLabel2.grid(column=0, row=4, sticky=(tk.W), padx=5)
# Sixth row
ttk.Label(self, textvariable=self.costPerKmLabel).grid(column=0,
row=5,
sticky=(tk.E))
ttk.Label(self, textvariable=self.cyclistCost).grid(column=1, row=5)
ttk.Label(self, textvariable=self.carCost).grid(column=2, row=5)
# Seventh row
ttk.Label(self, textvariable=self.kmPerPoundLabel).grid(column=0,
row=6,
sticky=(tk.E))
ttk.Label(self, textvariable=self.cyclistDistCost).grid(column=1,
row=6)
ttk.Label(self, textvariable=self.carDistCost).grid(column=2, row=6)
# Eighth row
ttk.Label(self, textvariable=self.totalKmLabel).grid(column=0, row=7,
sticky=(tk.E))
ttk.Label(self, textvariable=self.cyclistComparisonDist).grid(
column=1, row=7)
ttk.Label(self, textvariable=self.carComparisonDist).grid(column=2,
row=7)
# Info button
ttk.Button(self, text="Read info",
command=self.parent.showInfo).grid(column=3, row=0,
rowspan=8)
# Column & row config
self.columnconfigure(0, weight=5, pad=10)
self.columnconfigure(1, weight=1, pad=10, minsize=70)
self.columnconfigure(2, weight=1, pad=10, minsize=70)
self.columnconfigure(3, weight=1, pad=50)
self.rowconfigure(0, weight=1, pad=5)
self.rowconfigure(1, weight=1, pad=5)
self.rowconfigure(2, weight=1, pad=5)
self.rowconfigure(3, weight=1, pad=10)
self.rowconfigure(4, weight=1, pad=5)
self.rowconfigure(5, weight=1, pad=5)
self.rowconfigure(6, weight=1, pad=5)
self.rowconfigure(7, weight=1, pad=5)
def initialiseContents(self):
# Update fuel comparison calculations when efficiency changes
self.carEnergyUse.trace('w', lambda name, index, mode:
energycalc.comparison(self.parent, 'Car',
self.parent.options.fuelEnergyPerUnit,
self.parent.options.fuelEnergyTotal,
self.parent.options.fuelCost,
self.parent.options.fuelQuantity,
self.carEnergyUse,
self.cyclistEnergyUse))
self.cyclistEnergyUse.trace('w', lambda name, index, mode:
energycalc.comparison(self.parent, 'Cyclist',
self.parent.options.fuelEnergyPerUnit,
self.parent.options.fuelEnergyTotal,
self.parent.options.fuelCost,
self.parent.options.fuelQuantity,
self.carEnergyUse,
self.cyclistEnergyUse))
class InfoWindow(tk.Toplevel):
def __init__(self, parent):
tk.Toplevel.__init__(self, parent)
self.parent = parent
self.blurb = ("CyclistVsCar v0.1 (03/07/2015)\n"
"Written by Meghann Mears\n"
"\n"
"This app estimates and compares the energy efficiency of a\n"
"human cyclist and a car. It also compares how much it would\n"
"cost to run a cyclist on car fuel and vice versa. There are\n"
"a lot of assumptions made in the calculations: it's just for\n"
"fun, so please don't take the figures too seriously!\n"
"\n"
"Cyclist energy efficiency is calculated using the formula \n"
"given by Kerry Irons and makes assumptions about bike type,\n"
"road surface, riding position, gradient, weather conditions\n"
"and many other factors. Internet forums seem to suggest that\n"
"this formula give reasonable results, however.\n"
"\n"
"Car fuel efficiencies are from the (current) latest version\n"
"of data provided by the UK Government, and only includes cars\n"
"for sale as of August 2014. For older cars you will need to\n"
"look up and enter efficiency data manually. It's generally\n"
"accepted that the figures obtained in official testing give\n"
"much better efficiency than is seen in normal car use.\n"
"\n"
"I haven't (yet) put validation on the text entry boxes. If you\n"
"try to put something other than a positive number in, it won't\n"
"work and you won't get an error message (except in console).\n"
"\n"
"Links:\n"
"\n"
"Car efficiency data & explanation of methods:\n"
"http://carfueldata.direct.gov.uk/downloads/default.aspx\n"
"http://www.dft.gov.uk/vca/fcb/faqs-fuel-consumptio.asp\n"
"\n"
"Kerry Irons formula & discussion:\n"
"http://forums.roadbikereview.com/racing-training-nutrition-\n"
"triathlons/calories-burned-per-mile-formula-question-28863."
"html\n"
"\n"
"Project on GitHub:\n"
"https://github.com/MeghannMears/CyclistVsCar")
self.initialise()
def initialise(self):
ttk.Label(self, text=self.blurb).grid(row=0, column=0)
ttk.Button(self, text="Close info",
command=self.destroy).grid(row=1, column=0)
self.columnconfigure(0, pad=10)
self.rowconfigure(0, pad=10)
self.rowconfigure(1, pad=10)
class MainApplication(ttk.Frame):
def __init__(self, parent, *args, **kwargs):
ttk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
self.initialise()
def showInfo(self):
# Opens information window when button is pressed
InfoWindow(self)
def initialise(self):
# Initialise
self.options = Options(self)
self.cyclistData = CyclistData(self)
self.carData = CarData(self)
self.comparisons = Comparisons(self)
# Pack
self.options.grid(column=1, row=1)
self.cyclistData.grid(column=0, row=1)
self.carData.grid(column=0, row=0, columnspan=2)
self.comparisons.grid(column=0, row=2, columnspan=2)
# Column & row config
self.columnconfigure(0, weight=1, pad=10)
self.columnconfigure(1, weight=3, pad=10)
self.rowconfigure(0, weight=2, pad=10)
self.rowconfigure(1, weight=1, pad=10)
self.rowconfigure(2, weight=3, pad=10)
# Run method to set tracers & initialise cvalues
self.initialiseContents()
def initialiseContents(self):
# Sets tracers and initialises values.
# Separated from initialise methods due to referencing objects
# that haven't yet been created.
self.options.initialiseContents()
self.carData.initialiseContents()
self.cyclistData.initialiseContents()
self.comparisons.initialiseContents()
if __name__ == "__main__":
root = tk.Tk()
root.title('CyclistVsCar')
root.resizable(width=False, height=False)
MainApplication(root).grid(row=0, column=0)
root.mainloop()
|
{
"content_hash": "ac2a3bed5141a409dfe1266cf2f1f0c7",
"timestamp": "",
"source": "github",
"line_count": 660,
"max_line_length": 106,
"avg_line_length": 51.413636363636364,
"alnum_prop": 0.5150148822679987,
"repo_name": "MeghannMears/CyclistVsCar",
"id": "1671790091a787e58a1b0e4139d35dac08b57e6f",
"size": "33940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40351"
}
],
"symlink_target": ""
}
|
import sys
import asyncio
import telepot
from telepot.aio.helper import InlineUserHandler, AnswererMixin
from telepot.aio.delegate import per_inline_from_id, create_open, pave_event_space
"""
$ python3.5 inlinea.py <token>
It demonstrates answering inline query and getting chosen inline results.
"""
class InlineHandler(InlineUserHandler, AnswererMixin):
def __init__(self, *args, **kwargs):
super(InlineHandler, self).__init__(*args, **kwargs)
def on_inline_query(self, msg):
def compute_answer():
query_id, from_id, query_string = telepot.glance(msg, flavor='inline_query')
print(self.id, ':', 'Inline Query:', query_id, from_id, query_string)
articles = [{'type': 'article',
'id': 'abc', 'title': query_string, 'message_text': query_string}]
return articles
self.answerer.answer(msg, compute_answer)
def on_chosen_inline_result(self, msg):
from pprint import pprint
pprint(msg)
result_id, from_id, query_string = telepot.glance(msg, flavor='chosen_inline_result')
print(self.id, ':', 'Chosen Inline Result:', result_id, from_id, query_string)
#TOKEN = sys.argv[1]
bot = telepot.aio.DelegatorBot('292106014:AAEdLmqqhYHhDncqidNtFSNx9Mj7Fil50_8', [
pave_event_space()(
per_inline_from_id(), create_open, InlineHandler, timeout=10),
])
loop = asyncio.get_event_loop()
loop.create_task(bot.message_loop())
print('Listening ...')
loop.run_forever()
|
{
"content_hash": "29ac3f31e8311093573eab7aa031556a",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 95,
"avg_line_length": 32.93478260869565,
"alnum_prop": 0.6607260726072607,
"repo_name": "TEJESH/gandhi",
"id": "f064b729effb117acbfbc096cb2cdcde47475245",
"size": "1515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "goodreads/test2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66693"
}
],
"symlink_target": ""
}
|
import datetime
from django.utils.translation import ugettext_lazy as _
from google.appengine.ext import db as models
from ragendja.auth.google_models import GoogleUserTraits
from common.models import DeletedMarkerModel
from common import properties
from common.models import PRIVACY_PRIVATE, PRIVACY_CONTACTS, PRIVACY_PUBLIC, _get_actor_urlnick_from_nick, actor_url
from django.conf import settings
import logging
ACTOR_ALLOWED_EXTRA = ('contact_count',
'follower_count',
'icon',
'description',
'member_count',
'admin_count',
'given_name',
'family_name'
)
ACTOR_LIMITED_EXTRA = ('icon',
'description',
'given_name',
'family_name'
)
class User(DeletedMarkerModel, GoogleUserTraits):
"""
extra:
channel_count - int; number of channels
contact_count - int; number of contacts
follower_count - int; number of followers
icon - string; avatar path
bg_image - string; image for background (takes precedence over bg_color)
bg_color - string; color for background
bg_repeat - whether to repeat bg_image
description [channel] - string; Channel description
external_url [channel] - string; External url related ot channel
member_count [channel] - int; number of members
admin_count [channel] - int; number of admins
email_notify [user] - boolean; does the user want email notifications?
given_name [user] - string; First name
family_name [user] - string; Last Name
comments_hide [user] - boolean; Whether comments should be hidden on
overview
"""
nick = models.StringProperty()
# the appengine datastore is case-sensitive whereas human brains are not,
# Paul is not different from paul to regular people so we need a way to
# prevent duplicate names from cropping up, this adds an additional indexed
# property to support that
normalized_nick = models.StringProperty()
privacy = models.IntegerProperty()
type = models.StringProperty()
extra = properties.DictProperty()
# avatar_updated_at is used by DJabberd to get a list of changed avatar. We
# set the default to a date before the launch so that initial avatars have an
# updated_at that is less than any real changes.
avatar_updated_at = properties.DateTimeProperty(
default=datetime.datetime(2009, 01, 01))
key_template = 'actor/%(nick)s'
def url(self, path="", request=None, mobile=False):
""" returns a url, with optional path appended
NOTE: if appending a path, it should start with '/'
"""
return actor_url(_get_actor_urlnick_from_nick(self.nick),
self.type,
path=path,
request=request,
mobile=mobile)
def shortnick(self):
return _get_actor_urlnick_from_nick(self.nick)
def display_nick(self):
return self.nick.split("@")[0]
return _get_actor_urlnick_from_nick(self.nick)
def to_api(self):
rv = super(User, self).to_api()
del rv['password']
del rv['normalized_nick']
extra = {}
for k, v in rv['extra'].iteritems():
if k in ACTOR_ALLOWED_EXTRA:
extra[k] = v
rv['extra'] = extra
return rv
def to_api_limited(self):
rv = self.to_api()
extra = {}
for k, v in rv['extra'].iteritems():
if k in ACTOR_LIMITED_EXTRA:
extra[k] = v
rv['extra'] = extra
return rv
def is_channel(self):
return self.type == 'channel'
def is_public(self):
return self.privacy == PRIVACY_PUBLIC
def is_restricted(self):
return self.privacy == PRIVACY_CONTACTS
def __repr__(self):
# Get all properties, but not directly as property objects, because
# constructor requires values to be passed in.
d = dict([(k, self.__getattribute__(k)) for k in self.properties().keys()])
return "%s(**%s)" % (self.__class__.__name__, repr(d))
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
@classmethod
def create_djangouser_for_user(cls, user):
from common import api
actor_ref = api.actor_lookup_email(api.ROOT, user.email())
if actor_ref:
return actor_ref
params = {'nick': user.nickname(), 'password': "NOPASSWORD", 'first_name': user.nickname(), 'last_name': user.nickname()}
actor_ref = api.user_create(api.ROOT, **params)
actor_ref.access_level = "delete"
relation_ref = api.email_associate(api.ROOT, actor_ref.nick, user.email())
api.post(actor_ref,
nick=actor_ref.nick,
message='Joined %s!' % (settings.SITE_NAME),
icon='jaiku-new-user')
return actor_ref
|
{
"content_hash": "04796e7a2e4f5a4b9931cec4c3ed6011",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 126,
"avg_line_length": 34.992805755395686,
"alnum_prop": 0.6252055921052632,
"repo_name": "tallstreet/jaikuenginepatch",
"id": "39565613539769456ebfe02a7d84c4ff7df9d95f",
"size": "4864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/user_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "115039"
},
{
"name": "Python",
"bytes": "1011754"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "5208"
}
],
"symlink_target": ""
}
|
"""Tests for the Cloudflare integration."""
from typing import List
from unittest.mock import AsyncMock, patch
from pycfdns import CFRecord
from homeassistant.components.cloudflare.const import CONF_RECORDS, DOMAIN
from homeassistant.const import CONF_API_TOKEN, CONF_ZONE
from tests.common import MockConfigEntry
ENTRY_CONFIG = {
CONF_API_TOKEN: "mock-api-token",
CONF_ZONE: "mock.com",
CONF_RECORDS: ["ha.mock.com", "homeassistant.mock.com"],
}
ENTRY_OPTIONS = {}
USER_INPUT = {
CONF_API_TOKEN: "mock-api-token",
}
USER_INPUT_ZONE = {CONF_ZONE: "mock.com"}
USER_INPUT_RECORDS = {CONF_RECORDS: ["ha.mock.com", "homeassistant.mock.com"]}
MOCK_ZONE = "mock.com"
MOCK_ZONE_ID = "mock-zone-id"
MOCK_ZONE_RECORDS = [
{
"id": "zone-record-id",
"type": "A",
"name": "ha.mock.com",
"proxied": True,
"content": "127.0.0.1",
},
{
"id": "zone-record-id-2",
"type": "A",
"name": "homeassistant.mock.com",
"proxied": True,
"content": "127.0.0.1",
},
{
"id": "zone-record-id-3",
"type": "A",
"name": "mock.com",
"proxied": True,
"content": "127.0.0.1",
},
]
async def init_integration(
hass,
*,
data: dict = ENTRY_CONFIG,
options: dict = ENTRY_OPTIONS,
) -> MockConfigEntry:
"""Set up the Cloudflare integration in Home Assistant."""
entry = MockConfigEntry(domain=DOMAIN, data=data, options=options)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
def _get_mock_cfupdate(
zone: str = MOCK_ZONE,
zone_id: str = MOCK_ZONE_ID,
records: List = MOCK_ZONE_RECORDS,
):
client = AsyncMock()
zone_records = [record["name"] for record in records]
cf_records = [CFRecord(record) for record in records]
client.get_zones = AsyncMock(return_value=[zone])
client.get_zone_records = AsyncMock(return_value=zone_records)
client.get_record_info = AsyncMock(return_value=cf_records)
client.get_zone_id = AsyncMock(return_value=zone_id)
client.update_records = AsyncMock(return_value=None)
return client
def _patch_async_setup(return_value=True):
return patch(
"homeassistant.components.cloudflare.async_setup",
return_value=return_value,
)
def _patch_async_setup_entry(return_value=True):
return patch(
"homeassistant.components.cloudflare.async_setup_entry",
return_value=return_value,
)
|
{
"content_hash": "441816d9c5ca79b4fa7d7c61bdae7914",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 78,
"avg_line_length": 25.287128712871286,
"alnum_prop": 0.6366483946750195,
"repo_name": "partofthething/home-assistant",
"id": "c72a9cd84b01267b777e7292041967d66eb3913f",
"size": "2554",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/cloudflare/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
}
|
import argparse
import json
import logging
import textwrap
from os_cloud_config.cmd.utils import _clients
from os_cloud_config.cmd.utils import environment
from os_cloud_config import nodes
def parse_args():
description = textwrap.dedent("""
Register nodes with either Ironic or Nova-baremetal.
The JSON nodes file contains a list of node metadata. Each list item is
a JSON object describing one node, which has "memory" in KB, "cpu" in
threads, "arch" (one of i386/amd64/etc), "disk" in GB, "mac" a list of
MAC addresses for the node, and "pm_type", "pm_user", "pm_addr" and
"pm_password" describing power management details.
Ironic will be used if the Ironic service is registered with Keystone.
This program will wait up to 10 minutes for the baremetal service to
register a node.
""")
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description)
parser.add_argument('-s', '--service-host', dest='service_host',
help='Nova compute service host to register nodes '
'with')
parser.add_argument('-n', '--nodes', dest='nodes', required=True,
help='A JSON file containing a list of nodes that '
'are intended to be registered')
parser.add_argument('-r', '--remove', dest='remove', action='store_true',
help='Remove all unspecified nodes from the baremetal '
'service. Use with extreme caution!')
environment._add_logging_arguments(parser)
return parser.parse_args()
def main():
args = parse_args()
environment._configure_logging(args)
try:
with open(args.nodes, 'r') as node_file:
nodes_list = json.load(node_file)
environment._ensure()
keystone_client = _clients.get_keystone_client()
if nodes.using_ironic(keystone=keystone_client):
client = _clients.get_ironic_client()
else:
client = _clients.get_nova_bm_client()
nodes.register_all_nodes(
args.service_host, nodes_list, client=client, remove=args.remove,
blocking=True)
except Exception:
logging.exception("Unexpected error during command execution")
return 1
return 0
|
{
"content_hash": "e20c6924114bca26279fc8db276f5b9d",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 79,
"avg_line_length": 37.41269841269841,
"alnum_prop": 0.6419176919813322,
"repo_name": "shakamunyi/os-cloud-config",
"id": "5d80fd5b5107ca383c64f2f6bb10a822f71de446",
"size": "2968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "os_cloud_config/cmd/register_nodes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "154251"
}
],
"symlink_target": ""
}
|
import os
import argparse
import gettext
t = gettext.translation('globofs', os.path.join(os.path.dirname(__file__), 'locale'), fallback=True)
_ = t.ugettext
def run():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help=_('Commands'), dest='command')
# A list command
list_parser = subparsers.add_parser(
name='list', help=_('List contents')
)
list_parser.add_argument(
'dirname',
action='store',
help=_('Directory to list'))
# A create command
create_parser = subparsers.add_parser(
name='create', help=_('Create a directory'))
create_parser.add_argument(
'dirname',
action='store',
help=_('New directory to create'))
create_parser.add_argument(
'--read-only', default=False, action='store_true',
help=_('Set permissions to prevent writing to the directory')
)
# A delete command
delete_parser = subparsers.add_parser(
name='delete',
help=_('Remove a directory'))
delete_parser.add_argument(
'dirname',
action='store',
help=_('The directory to remove'))
delete_parser.add_argument(
'--recursive', '-r', default=False, action='store_true',
help=_('Remove the contents of the directory, too'),
)
print parser.parse_args()
if __name__ == "__main__":
run()
|
{
"content_hash": "08d1f3dcf97cb0b4ae551cc21b25ee40",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 100,
"avg_line_length": 25.418181818181818,
"alnum_prop": 0.6015736766809728,
"repo_name": "GloboFS/GloboFS",
"id": "504071df8ef741b7ad0a2cfb09a4a1b7cdc78ab3",
"size": "1445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "globofs/console.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11129"
}
],
"symlink_target": ""
}
|
'Save a subset of lines from an input file; start at offset and count n lines'
'default 100 lines starting from 0'
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
try:
offset = int( sys.argv[3] )
except IndexError:
offset = 0
try:
lines = int( sys.argv[4] )
except IndexError:
lines = 100
i = open( input_file )
o = open( output_file, 'wb' )
count = 0
for line in i:
if offset > 0:
offset -= 1
continue
o.write( line )
count += 1
if count >= lines:
break
|
{
"content_hash": "07a289abab0715285db55cd3065550df",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 12.357142857142858,
"alnum_prop": 0.6204238921001927,
"repo_name": "amunategui/phraug",
"id": "993b55443e8e47239b0ab6c77d5bc58cb6333398",
"size": "519",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "subset.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Device',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('device_name', models.CharField(max_length=25, validators=[django.core.validators.RegexValidator(message='Invalid name', regex='^[a-zA-Z0-9_−]{1,25}$')])),
('device_ip', models.GenericIPAddressField(protocol='IPv4', unique=True)),
('description', models.CharField(blank=True, max_length=255)),
('add_date', models.DateTimeField(auto_now_add=True, verbose_name='Date added')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last update')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Interface',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('interface_type', models.CharField(choices=[('WLP', 'Wifi'), ('ETH', 'Ethernet')], default='ETH', max_length=3)),
('mac_address', models.CharField(max_length=17, unique=True, validators=[django.core.validators.RegexValidator(message='Invalid MAC address', regex='^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$')])),
('description', models.CharField(blank=True, max_length=255)),
('add_date', models.DateTimeField(auto_now_add=True, verbose_name='Date added')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Last update')),
('device', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='network.Device')),
],
),
]
|
{
"content_hash": "982a63d9f00bf1230cc7f157f804a4e0",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 209,
"avg_line_length": 52.285714285714285,
"alnum_prop": 0.6174863387978142,
"repo_name": "Atilla106/members.atilla.org",
"id": "715f91fbcbe480d2b8d60ee651c857984191a594",
"size": "2270",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "network/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "131677"
},
{
"name": "HTML",
"bytes": "18670"
},
{
"name": "JavaScript",
"bytes": "228210"
},
{
"name": "Python",
"bytes": "83078"
},
{
"name": "Ruby",
"bytes": "382"
},
{
"name": "Shell",
"bytes": "1928"
}
],
"symlink_target": ""
}
|
import os, sys, time
import urllib2
def main():
# host = "http://localhost:30001/control?port=/dev/ttyUSB0"
host = "http://localhost:30001/control?port=/dev/ttyUSB0&dtr=1&rts=0"
try:
req = urllib2.urlopen(host)
print("Reply data:")
print(req.read())
except Exception as e:
print("exception occurred:")
print(e)
return 1
if __name__ == '__main__':
main()
|
{
"content_hash": "446b87fd3c5be5c4db49477553a87197",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 73,
"avg_line_length": 23.555555555555557,
"alnum_prop": 0.5801886792452831,
"repo_name": "IECS/MansOS",
"id": "e15349b386968723821f5416f370db7d91451234",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/remoteaccess/tests/02-control.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2207767"
},
{
"name": "C++",
"bytes": "105213"
},
{
"name": "EmberScript",
"bytes": "22"
},
{
"name": "HTML",
"bytes": "2485"
},
{
"name": "Makefile",
"bytes": "338645"
},
{
"name": "Objective-C",
"bytes": "23658"
},
{
"name": "PHP",
"bytes": "31840"
},
{
"name": "Perl",
"bytes": "17357"
},
{
"name": "Python",
"bytes": "208619"
},
{
"name": "Slash",
"bytes": "3318"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._management_policies_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagementPoliciesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2022_09_01.aio.StorageManagementClient`'s
:attr:`management_policies` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, _models.ManagementPolicyName],
**kwargs: Any
) -> _models.ManagementPolicy:
"""Gets the managementpolicy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'. "default" Required.
:type management_policy_name: str or
~azure.mgmt.storage.v2022_09_01.models.ManagementPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2022_09_01.models.ManagementPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01")) # type: Literal["2022-09-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagementPolicy]
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
management_policy_name=management_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagementPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}"} # type: ignore
@overload
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, _models.ManagementPolicyName],
properties: _models.ManagementPolicy,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagementPolicy:
"""Sets the managementpolicy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'. "default" Required.
:type management_policy_name: str or
~azure.mgmt.storage.v2022_09_01.models.ManagementPolicyName
:param properties: The ManagementPolicy set to a storage account. Required.
:type properties: ~azure.mgmt.storage.v2022_09_01.models.ManagementPolicy
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2022_09_01.models.ManagementPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, _models.ManagementPolicyName],
properties: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagementPolicy:
"""Sets the managementpolicy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'. "default" Required.
:type management_policy_name: str or
~azure.mgmt.storage.v2022_09_01.models.ManagementPolicyName
:param properties: The ManagementPolicy set to a storage account. Required.
:type properties: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2022_09_01.models.ManagementPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, _models.ManagementPolicyName],
properties: Union[_models.ManagementPolicy, IO],
**kwargs: Any
) -> _models.ManagementPolicy:
"""Sets the managementpolicy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'. "default" Required.
:type management_policy_name: str or
~azure.mgmt.storage.v2022_09_01.models.ManagementPolicyName
:param properties: The ManagementPolicy set to a storage account. Is either a model type or a
IO type. Required.
:type properties: ~azure.mgmt.storage.v2022_09_01.models.ManagementPolicy or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagementPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2022_09_01.models.ManagementPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01")) # type: Literal["2022-09-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagementPolicy]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(properties, (IO, bytes)):
_content = properties
else:
_json = self._serialize.body(properties, "ManagementPolicy")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
management_policy_name=management_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagementPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
management_policy_name: Union[str, _models.ManagementPolicyName],
**kwargs: Any
) -> None:
"""Deletes the managementpolicy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param management_policy_name: The name of the Storage Account Management Policy. It should
always be 'default'. "default" Required.
:type management_policy_name: str or
~azure.mgmt.storage.v2022_09_01.models.ManagementPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01")) # type: Literal["2022-09-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
management_policy_name=management_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/managementPolicies/{managementPolicyName}"} # type: ignore
|
{
"content_hash": "a24fc1e3c823356946ebf7dbb6a19262",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 225,
"avg_line_length": 46.04532577903683,
"alnum_prop": 0.6651285837332349,
"repo_name": "Azure/azure-sdk-for-python",
"id": "689b92d93bde1f0c1283f56ec3340110e01a1aa0",
"size": "16754",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2022_09_01/aio/operations/_management_policies_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication, BasicAuthentication, SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from .models import Version
from .serializers import VersionSerializer
class VersionViewSet(viewsets.ModelViewSet):
authentication_classes = (TokenAuthentication, BasicAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
queryset = Version.objects.all()
serializer_class = VersionSerializer
# def get_queryset(self):
# version = self.kwargs.get('version_pk', None)
# Version = self.kwargs.get('Version_pk', None)
# if version and Version:
# return Version.objects.filter(version=version)
# return super().get_queryset()
|
{
"content_hash": "0c05b839298b272116b2dc3072576cf2",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 105,
"avg_line_length": 38.666666666666664,
"alnum_prop": 0.7512315270935961,
"repo_name": "bellhops/TapeDeck",
"id": "b81cc2aa4b759b825dd8b6ea8fc12f6853bd4f62",
"size": "812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tapedeck/version/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14187"
},
{
"name": "Shell",
"bytes": "2604"
}
],
"symlink_target": ""
}
|
"""
AIR compilation environment.
"""
from __future__ import print_function, division, absolute_import
# Any state that should persist between passes end up in the environment, and
# should be documented here
air_env = {
# blaze expression graph
#'expr_graph': None,
# strategy determined for each Op: { Op : strategy }
# For instance different sub-expressions may be execution in different
# environments
'strategies': None,
# Runtime input arguments
'runtime.args': None,
# Set by partitioning pass, indicates for each Op and strategy which
# overload should be used. { (Op, strategy) : Overload }
'kernel.overloads': None,
# storage passed in to blaze.eval(). This is where we store the result
'storage': None,
# Implementation for each op: { Op: Overload }
# This is set by assemblage.py
#'kernel.impls': None,
}
def fresh_env(expr, storage, debug=False):
"""
Allocate a new environment.
"""
env = dict(air_env)
env['storage'] = storage
env['debug'] = debug
return env
|
{
"content_hash": "cdc0dec64111d87bb9b4e5a0f5370e06",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 77,
"avg_line_length": 26.878048780487806,
"alnum_prop": 0.6451905626134301,
"repo_name": "aaronmartin0303/blaze",
"id": "a33c9f6bd51e9a6e6562adcb89aa150298cf19b6",
"size": "1102",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "blaze/compute/air/environment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import collections
from supriya.tools.ugentools.UGen import UGen
class OffsetOut(UGen):
r'''A bus output unit generator with sample-accurate timing.
::
>>> source = ugentools.SinOsc.ar()
>>> ugentools.OffsetOut.ar(
... bus=0,
... source=source,
... )
OffsetOut.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Input/Output UGens'
__slots__ = ()
_ordered_input_names = (
'bus',
)
_unexpanded_input_names = (
'source',
)
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
bus=0,
source=None,
):
UGen.__init__(
self,
bus=bus,
calculation_rate=calculation_rate,
)
if not isinstance(source, collections.Sequence):
source = [source]
for single_source in source:
self._configure_input('source', single_source)
### PRIVATE METHODS ###
def _get_outputs(self):
return []
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
bus=0,
source=None,
):
r'''Constructs a sample-accurately-timed audio-rate bus output.
::
>>> source = ugentools.SinOsc.ar(frequency=[440, 442])
>>> offset_out = ugentools.OffsetOut.ar(
... bus=0,
... source=source,
... )
>>> offset_out
OffsetOut.ar()
Returns ugen graph.
'''
from supriya.tools import servertools
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
prototype = (
servertools.Bus,
servertools.BusGroup,
servertools.BusProxy,
)
if isinstance(bus, prototype):
bus = int(bus)
return cls._new_expanded(
bus=bus,
calculation_rate=calculation_rate,
source=source,
)
### PUBLIC PROPERTIES ###
@property
def bus(self):
r'''Gets `bus` input of OffsetOut.
::
>>> bus = 0
>>> source = ugentools.WhiteNoise.ar()
>>> offset_out = ugentools.OffsetOut.ar(
... bus=bus,
... source=source,
... )
>>> offset_out.bus
0.0
Returns input.
'''
index = self._ordered_input_names.index('bus')
return self._inputs[index]
|
{
"content_hash": "cee82802c2da32c5d6f0a1c040857b3e",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 71,
"avg_line_length": 22.80701754385965,
"alnum_prop": 0.4826923076923077,
"repo_name": "andrewyoung1991/supriya",
"id": "5f791e85303be1e4a07b63f63ee1760d36f8f458",
"size": "2626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/tools/ugentools/OffsetOut.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2693776"
}
],
"symlink_target": ""
}
|
from array import array
from six.moves import collections_abc
import six
from ..operator import Operator
class ContainOperator(Operator):
"""
Asserts if a given value or values can be found
in a another object.
Example::
# Should style
'foo bar' | should.contain('bar')
['foo', 'bar'] | should.contain('bar')
['foo', 'bar'] | should.contain('foo', 'bar')
[{'foo': True}, 'bar'] | should.contain({'foo': True})
# Should style - negation form
'foo bar' | should.do_not.contain('bar')
['foo', 'bar'] | should.do_not.contain('baz')
# Expect style
'foo bar' | expect.to.contain('bar')
['foo', 'bar'] | expect.to.contain('bar')
['foo', 'bar'] | expect.to.contain('foo', 'bar')
[{'foo': True}, 'bar'] | expect.to.contain({'foo': True})
# Expect style - negation form
'foo bar' | expect.to_not.contain('bar')
['foo', 'bar'] | expect.to_not.contain('baz')
"""
# Is the operator a keyword
kind = Operator.Type.MATCHER
# Enable diff report
show_diff = True
# Operator keywords
operators = ('contain', 'contains', 'includes')
# Operator chain aliases
aliases = ('value', 'item', 'string', 'text', 'expression', 'data')
# Expected template message
expected_message = Operator.Dsl.Message(
'a value that contains "{value}"',
'a value that does not contains "{value}"',
)
# Subject template message
subject_message = Operator.Dsl.Message(
'a value of type "{type}" with content "{value}"',
)
# Stores types to normalize before the assertion
NORMALIZE_TYPES = (
collections_abc.Iterator,
collections_abc.MappingView,
collections_abc.Set,
array
)
LIST_TYPES = (tuple, list, set, array)
def match(self, subject, *values):
if isinstance(subject, self.NORMALIZE_TYPES):
subject = list(subject)
elif isinstance(subject, collections_abc.Mapping):
subject = list(subject.values())
if not isinstance(subject, collections_abc.Sequence):
return False, ['is not a valid sequence type']
reasons = []
if len(values) == 1 and isinstance(values[0], self.LIST_TYPES):
values = list(values[0])
for value in values:
matches_any, reason = self._matches_any(value, subject)
reasons.append(reason)
if not matches_any:
return False, [reason]
return True, reasons
def _matches_any(self, expected, subject):
if len(subject) == 0:
return False, 'empty item'
if isinstance(subject, six.string_types):
if expected in subject:
return True, 'item {0!r} found'.format(expected)
return False, 'item {0!r} not found'.format(expected)
for item in subject:
if item == expected:
return True, 'item {0!r} found'.format(expected)
return False, 'item {0!r} not found'.format(expected)
|
{
"content_hash": "55707fca57237644f79c33004aea8f6d",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 71,
"avg_line_length": 29.61904761904762,
"alnum_prop": 0.577491961414791,
"repo_name": "grappa-py/grappa",
"id": "6da10c97120500f561682e3b7e57915195484c7c",
"size": "3134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grappa/operators/contain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1520"
},
{
"name": "Python",
"bytes": "144569"
}
],
"symlink_target": ""
}
|
"""Display the aggregated feeds."""
from datetime import date
from .database import Entry
from .utils import expose
from .utils import Pagination
from .utils import render_template
#: number of items per page
PER_PAGE = 30
@expose("/", defaults={"page": 1})
@expose("/page/<int:page>")
def index(request, page):
"""Show the index page or any an offset of it."""
days = []
days_found = set()
query = Entry.query.order_by(Entry.pub_date.desc())
pagination = Pagination(query, PER_PAGE, page, "index")
for entry in pagination.entries:
day = date(*entry.pub_date.timetuple()[:3])
if day not in days_found:
days_found.add(day)
days.append({"date": day, "entries": []})
days[-1]["entries"].append(entry)
return render_template("index.html", days=days, pagination=pagination)
@expose("/about")
def about(request):
"""Show the about page, so that we have another view func ;-)"""
return render_template("about.html")
|
{
"content_hash": "f1637fcc556ddda3f4daa56979e8a6dd",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 74,
"avg_line_length": 29.41176470588235,
"alnum_prop": 0.649,
"repo_name": "fkazimierczak/werkzeug",
"id": "1729f9a23a080636fda99b6817e5e756722468c7",
"size": "1000",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/plnt/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6705"
},
{
"name": "HTML",
"bytes": "124"
},
{
"name": "JavaScript",
"bytes": "10524"
},
{
"name": "Python",
"bytes": "1136488"
}
],
"symlink_target": ""
}
|
import sys
from pylint.lint import Run
min_score = 10
results = Run(['pypoabus', 'tests'], exit=False)
global_note = results.linter.stats['global_note']
if global_note >= min_score:
print('Minimum score reached! min_score = {}'.format(str(min_score)))
else:
print('Minimum score not reached! min_score = {}'.format(str(min_score)))
sys.exit(1)
|
{
"content_hash": "0fee70532fde038ade0ae96ae29e50a9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 77,
"avg_line_length": 25.714285714285715,
"alnum_prop": 0.6805555555555556,
"repo_name": "jonathadv/PyPoABus",
"id": "be06b7fead13ffdb77bb1c2eaf81637f9a4e20db",
"size": "360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lint.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14959"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
}
|
import os
class Dict(object):
"""
Dict utils.
"""
@staticmethod
def flat_dict(d, delimiter="/", start_char="^", end_char="$", key="", out={}):
"""
Flats hierarhical dict.
"""
for k,v in d.iteritems():
new_key = key + delimiter + k
if isinstance(v, dict):
Dict.flat_dict(v, delimiter, start_char, end_char, new_key, out)
else:
out[start_char + new_key + end_char] = v
return out
@staticmethod
def merge(d1, d2):
"""
Deep merge of two dicts.
"""
for k1,v1 in d1.iteritems():
if not k1 in d2:
d2[k1] = v1
elif isinstance(v1, list):
d2[k1] = d2[k1] + v1
elif isinstance(v1, dict):
Dict.merge(v1, d2[k1])
return d2
class Boolean(object):
@staticmethod
def ternary(cond, t, f):
"""
Use it if you need ternary operator
"""
return (cond and [t] or [f])[0]
class Enum(object):
"""
Creates enum.
"""
def __init__(self, **enums):
self.__dict__ = enums
self._enums = enums
def get(self, name):
return self._enums[name]
def get_by_value(self, value):
if not self.has_value(value):
raise LookupError("Enum does not contain value %s" % value)
return value
def has_value(self, value):
return value in self._enums.values()
def has_key(self, key):
return key in self._enums.keys()
class Struct(object):
"""
Dict to object conversion.
"""
def __init__(self, **entries):
self.__dict__.update(entries)
class Regex(object):
"""
Various regex utils
"""
@staticmethod
def string_url_regex(str_name):
"""
Returns regex for url mapping with sting param
"""
return r"""(?P<%s>[^ \,\:\;\"\\/']+)""" % str_name
@staticmethod
def number_url_regex(num_name):
"""
Returns regex for url mapping with int param
"""
return r"""(?P<%s>\d+)""" % num_name
|
{
"content_hash": "8186d93f948a02b824fac2806a1de63f",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 19.236559139784948,
"alnum_prop": 0.6003353828954723,
"repo_name": "stanislavfeldman/putils",
"id": "e0ea88fc58a3cc681604d05196868d215d501265",
"size": "1789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "putils/types.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5580"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta
from abc import abstractmethod
import signal
import sys
import traceback
EXECUTION_TIME_LIMIT_IN_SECONDS = 2
def signal_handler(signum, frame):
raise Exception("Execution timed out!")
class Seller(object):
"""
Abstract class of a simulation game seller. Defines a common interface for every team
"""
__metaclass__ = ABCMeta
def __init__(self):
self.time_out = False
def get_price(self, t, inventory_h, price_h, price_scale, horizon, num_buyers):
# Limit execution time of the pricing function to 1 second
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(EXECUTION_TIME_LIMIT_IN_SECONDS)
try:
return self._get_price_impl(t, inventory_h, price_h, price_scale, horizon, num_buyers)
except Exception as e:
print "Exception thrown in seller {0}!".format(self.get_name())
print traceback.print_exc(file=sys.stdout)
self.time_out = True
return 1e7
@abstractmethod
def _get_price_impl(self, t, inventory_h, price_h, price_scale, horizon, num_buyers):
"""
Queries the seller for the price it's going to post for the current time step
:param t: current time step
:param inventory_h: the vector (x_0, X_1, ..., X_{t-1}) of the past evolution of the seller's inventory
:param price_h: the vector (p_1, ..., p_{t-1}) of the past evolution of the seller's prices
:param price_scale: the parameter of the Exponential distribution of reserve prices. Check numpy.random
documentation for the exact definition. Note that the mean of this distribution equals price_scale
:param horizon: length of a game
:param num_buyers: number of buyers
:return: the price the seller posts now
"""
return
@abstractmethod
def get_name(self):
"""
Returns the name of the seller. Helps in analyzing results
"""
return
class DummySeller(Seller):
"""
A dummy seller bot for the purposes of testing the game
"""
def get_name(self):
return "DummySeller(" + self.name + ")"
def __init__(self, name, constant_price=1.):
self.name = name
self.constant_price = constant_price
def _get_price_impl(self, t, inventory_h, price_h, price_scale, horizon, num_buyers):
"""
This seller always posts a fixed price regardless of anything
"""
return self.constant_price
|
{
"content_hash": "a79d4413fca82d459dff224d8a07882f",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 126,
"avg_line_length": 34.36486486486486,
"alnum_prop": 0.635863153755407,
"repo_name": "gutin/DynamicPricingGame",
"id": "46255167af3339d37246391a64cd75f305f13919",
"size": "2543",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simulation_game/seller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3461"
},
{
"name": "Python",
"bytes": "15376"
}
],
"symlink_target": ""
}
|
import json
import Image, ImageDraw, math
def generate_image(jsonData, sizeX=0, sizeY=0):
"""Parses the JSON data from signature-panel to produce a bitmap image"""
if sizeX < 0:
raise ValueError("sizeX must be >= 0")
if sizeY < 0:
raise ValueError("sizeY must be >= 0")
data = json.loads(jsonData)
if data["dataVersion"] != 1:
raise ValueError("Unsupported data version")
if data["canvasWidth"] <= 0:
raise ValueError("Invalid canvasWidth")
if data["canvasHeight"] <= 0:
raise ValueError("Invalid canvasHeight")
if (sizeX == 0 or sizeY == 0):
sizeX = data["canvasWidth"]
sizeY = data["canvasHeight"]
scalingFactor = min([
sizeX / float(data["canvasWidth"]),
sizeY / float(data["canvasHeight"])])
penColor = data["penColor"]
penWidth = max([data["penWidth"] * scalingFactor, 1])
polylines = []
for event in data["clickstream"]:
pt = (event["x"] * scalingFactor, event["y"] * scalingFactor)
if event["action"] in ["gestureStart", "gestureResume"]:
polylines.append([pt])
elif event["action"] in ["gestureContinue", "gestureSuspend"]:
if polylines:
polylines[-1].append(pt)
return draw_upsampled_polyline(sizeX, sizeY, penColor, penWidth, polylines)
def draw_upsampled_polyline(sizeX, sizeY, penColor, penWidth, polylines, upsampleFactor=4):
""" PIL lacks three things that we need to make the generated image match what the user saw
* antialiased lines
* round endcaps on lines
* round mitred joins between lines
We emulate antialising by drawing at a larger size (given by the upsample factor) and scaling down
We emulate the endcaps and mitres by drawing circles at each vertex point.
As an alternative, you could use a different library (e.g., ImageMagick) that handles these features natively.
The emulation approach is attractive because PIL is nearly ubiquitous whereas alternative graphic libraries
are less common."""
def upsample(pt):
return (upsampleFactor * pt[0], upsampleFactor * pt[1])
image = Image.new("RGBA", upsample((sizeX, sizeY)), (0, 0, 0, 0))
drawing = ImageDraw.Draw(image)
if len(polylines) > 0:
p = upsampleFactor * penWidth / 2
x1, y1 = upsample(polylines[0][0])
drawing.ellipse((x1 - p, y1 - p, x1 + p, y1 + p), fill=penColor)
for polyline in polylines:
for i in xrange(0, len(polyline) - 1):
x1, y1 = upsample(polyline[i])
x2, y2 = upsample(polyline[i + 1])
dx = x2 - x1
dy = y2 - y1
d = math.sqrt(dx * dx + dy * dy)
if d > 0:
s = p / d
newPoly = [x1 - s * dy, y1 + s * dx, x2 - s * dy, y2 + s * dx, x2 + s * dy, y2 - s * dx, x1 + s * dy, y1 - s * dx]
drawing.polygon(newPoly, fill=penColor)
drawing.ellipse((x2 - p, y2 - p, x2 + p, y2 + p), fill=penColor)
return image.resize((sizeX, sizeY), Image.ANTIALIAS)
|
{
"content_hash": "f759a65f1691ed36763c59505c4a6e7e",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 134,
"avg_line_length": 39.425,
"alnum_prop": 0.5932149651236525,
"repo_name": "jes-sherborne/jquery-signature-panel-plugin",
"id": "f69a6ec131ceb36db1040f8d9506760fe7c98cba",
"size": "3154",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server-image-generators/python/signature_panel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "77944"
},
{
"name": "PHP",
"bytes": "10274"
},
{
"name": "Python",
"bytes": "7164"
},
{
"name": "Ruby",
"bytes": "4588"
},
{
"name": "Shell",
"bytes": "1233"
}
],
"symlink_target": ""
}
|
"""
PEP386-version comparison algorithm.
(c) Tarek Ziade and others
extracted unmodified from https://bitbucket.org/tarek/distutilsversion
licensed under the PSF license (i guess)
UPDATE 2017-09-06: removed suggest_normalized_version function as it is not needed by the project
"""
import re
class IrrationalVersionError(Exception):
"""This is an irrational version."""
pass
class HugeMajorVersionNumError(IrrationalVersionError):
"""An irrational version because the major version number is huge
(often because a year or date was used).
See `error_on_huge_major_num` option in `NormalizedVersion` for details.
This guard can be disabled by setting that option False.
"""
pass
# A marker used in the second and third parts of the `parts` tuple, for
# versions that don't have those segments, to sort properly. An example
# of versions in sort order ('highest' last):
# 1.0b1 ((1,0), ('b',1), ('f',))
# 1.0.dev345 ((1,0), ('f',), ('dev', 345))
# 1.0 ((1,0), ('f',), ('f',))
# 1.0.post256.dev345 ((1,0), ('f',), ('f', 'post', 256, 'dev', 345))
# 1.0.post345 ((1,0), ('f',), ('f', 'post', 345, 'f'))
# ^ ^ ^
# 'b' < 'f' ---------------------/ | |
# | |
# 'dev' < 'f' < 'post' -------------------/ |
# |
# 'dev' < 'f' ----------------------------------------------/
# Other letters would do, but 'f' for 'final' is kind of nice.
FINAL_MARKER = ('f',)
VERSION_RE = re.compile(r'''
^
(?P<version>\d+\.\d+) # minimum 'N.N'
(?P<extraversion>(?:\.\d+)*) # any number of extra '.N' segments
(?:
(?P<prerel>[abc]|rc) # 'a'=alpha, 'b'=beta, 'c'=release candidate
# 'rc'= alias for release candidate
(?P<prerelversion>\d+(?:\.\d+)*)
)?
(?P<postdev>(\.post(?P<post>\d+))?(\.dev(?P<dev>\d+))?)?
$''', re.VERBOSE)
class NormalizedVersion(object):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # mininum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def __init__(self, s, error_on_huge_major_num=True):
"""Create a NormalizedVersion instance from a version string.
@param s {str} The version string.
@param error_on_huge_major_num {bool} Whether to consider an
apparent use of a year or full date as the major version number
an error. Default True. One of the observed patterns on PyPI before
the introduction of `NormalizedVersion` was version numbers like this:
2009.01.03
20040603
2005.01
This guard is here to strongly encourage the package author to
use an alternate version, because a release deployed into PyPI
and, e.g. downstream Linux package managers, will forever remove
the possibility of using a version number like "1.0" (i.e.
where the major number is less than that huge major number).
"""
self._parse(s, error_on_huge_major_num)
@classmethod
def from_parts(cls, version, prerelease=FINAL_MARKER,
devpost=FINAL_MARKER):
return cls(cls.parts_to_str((version, prerelease, devpost)))
def _parse(self, s, error_on_huge_major_num=True):
"""Parses a string version into parts."""
match = VERSION_RE.search(s)
if not match:
raise IrrationalVersionError(s)
groups = match.groupdict()
parts = []
# main version
block = self._parse_numdots(groups['version'], s, False, 2)
extraversion = groups.get('extraversion')
if extraversion not in ('', None):
block += self._parse_numdots(extraversion[1:], s)
parts.append(tuple(block))
# prerelease
prerel = groups.get('prerel')
if prerel is not None:
block = [prerel]
block += self._parse_numdots(groups.get('prerelversion'), s,
pad_zeros_length=1)
parts.append(tuple(block))
else:
parts.append(FINAL_MARKER)
# postdev
if groups.get('postdev'):
post = groups.get('post')
dev = groups.get('dev')
postdev = []
if post is not None:
postdev.extend([FINAL_MARKER[0], 'post', int(post)])
if dev is None:
postdev.append(FINAL_MARKER[0])
if dev is not None:
postdev.extend(['dev', int(dev)])
parts.append(tuple(postdev))
else:
parts.append(FINAL_MARKER)
self.parts = tuple(parts)
if error_on_huge_major_num and self.parts[0][0] > 1980:
raise HugeMajorVersionNumError(
"huge major version number, %r, "
"which might cause future problems: %r" % (self.parts[0][0], s))
def _parse_numdots(self, s, full_ver_str, drop_trailing_zeros=True,
pad_zeros_length=0):
"""Parse 'N.N.N' sequences, return a list of ints.
@param s {str} 'N.N.N..." sequence to be parsed
@param full_ver_str {str} The full version string from which this
comes. Used for error strings.
@param drop_trailing_zeros {bool} Whether to drop trailing zeros
from the returned list. Default True.
@param pad_zeros_length {int} The length to which to pad the
returned list with zeros, if necessary. Default 0.
"""
nums = []
for n in s.split("."):
if len(n) > 1 and n[0] == '0':
raise IrrationalVersionError(
"cannot have leading zero in "
"version number segment: '%s' in %r" % (n, full_ver_str))
nums.append(int(n))
if drop_trailing_zeros:
while nums and nums[-1] == 0:
nums.pop()
while len(nums) < pad_zeros_length:
nums.append(0)
return nums
def __str__(self):
return self.parts_to_str(self.parts)
@classmethod
def parts_to_str(cls, parts):
"""Transforms a version expressed in tuple into its string
representation."""
# XXX This doesn't check for invalid tuples
main, prerel, postdev = parts
s = '.'.join(str(v) for v in main)
if prerel is not FINAL_MARKER:
s += prerel[0]
s += '.'.join(str(v) for v in prerel[1:])
if postdev and postdev is not FINAL_MARKER:
if postdev[0] == 'f':
postdev = postdev[1:]
i = 0
while i < len(postdev):
if i % 2 == 0:
s += '.'
s += str(postdev[i])
i += 1
return s
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self)
def _cannot_compare(self, other):
raise TypeError("cannot compare %s and %s"
% (type(self).__name__, type(other).__name__))
def __eq__(self, other):
if not isinstance(other, NormalizedVersion):
self._cannot_compare(other)
return self.parts == other.parts
def __lt__(self, other):
if not isinstance(other, NormalizedVersion):
self._cannot_compare(other)
return self.parts < other.parts
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
|
{
"content_hash": "99a88ac40e9eea1636652d825b2b2e7a",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 97,
"avg_line_length": 36.5695067264574,
"alnum_prop": 0.5200490496627835,
"repo_name": "loechel/tox",
"id": "dca82156a40fba206ec93bb40175f8731dfedb6e",
"size": "8155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tox/_verlib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "296218"
}
],
"symlink_target": ""
}
|
r"""TestplanEntry and Testplan classes for maintaining testplan entries
"""
import re
import sys
from tabulate import tabulate
class TestplanEntry():
"""An entry in the testplan
A testplan entry has the following information: name of the planned test (testpoint),
a brief description indicating intent, stimulus and checking procedure, targeted milestone
and the list of actual developed tests.
"""
name = ""
desc = ""
milestone = ""
tests = []
fields = ("name", "desc", "milestone", "tests")
milestones = ("N.A.", "V1", "V2", "V3")
def __init__(self, name, desc, milestone, tests, substitutions=[]):
self.name = name
self.desc = desc
self.milestone = milestone
self.tests = tests
if not self.do_substitutions(substitutions): sys.exit(1)
@staticmethod
def is_valid_entry(kv_pairs):
'''Pass a list of key=value pairs to check if testplan entries can be extracted
from it.
'''
for field in TestplanEntry.fields:
if not field in kv_pairs.keys():
print(
"Error: input key-value pairs does not contain all of the ",
"required fields to create an entry:\n", kv_pairs,
"\nRequired fields:\n", TestplanEntry.fields)
return False
if type(kv_pairs[field]) is str and kv_pairs[field] == "":
print("Error: field \'", field, "\' is an empty string\n:",
kv_pairs)
return False
if field == "milestone" and kv_pairs[
field] not in TestplanEntry.milestones:
print("Error: milestone \'", kv_pairs[field],
"\' is invalid. Legal values:\n",
TestplanEntry.milestones)
return False
return True
def do_substitutions(self, substitutions):
'''Substitute {wildcards} in tests
If tests have {wildcards}, they are substituted with the 'correct' values using
key=value pairs provided by the substitutions arg. If wildcards are present but no
replacement is available, then the wildcards are replaced with an empty string.
'''
if substitutions == []: return True
for kv_pair in substitutions:
resolved_tests = []
[(k, v)] = kv_pair.items()
for test in self.tests:
match = re.findall(r"{([A-Za-z0-9\_]+)}", test)
if len(match) > 0:
# match is a list of wildcards used in test
for item in match:
if item == k:
if type(v) is list:
if v == []:
resolved_test = test.replace(
"{" + item + "}", "")
resolved_tests.append(resolved_test)
else:
for subst_item in v:
resolved_test = test.replace(
"{" + item + "}", subst_item)
resolved_tests.append(resolved_test)
elif type(v) is str:
resolved_test = test.replace(
"{" + item + "}", v)
resolved_tests.append(resolved_test)
else:
print(
"Error: wildcard", item, "in test", test,
"has no viable",
"replacement value (need str or list):\n",
kv_pair)
return False
else:
resolved_tests.append(test)
if resolved_tests != []: self.tests = resolved_tests
# if wildcards have no available replacements in substitutions arg, then
# replace with empty string
resolved_tests = []
for test in self.tests:
match = re.findall(r"{([A-Za-z0-9\_]+)}", test)
if len(match) > 0:
for item in match:
resolved_tests.append(test.replace("{" + item + "}", ""))
if resolved_tests != []: self.tests = resolved_tests
return True
def map_regr_results(self, regr_results, map_full_testplan=True):
'''map regression results to tests in this entry
Given a list of regression results (a tuple containing {test name, # passing and
# total} find if the name of the test in the results list matches the written tests
in this testplan entry. If there is a match, then append the passing / total
information. If no match is found, or if self.tests is an empty list, indicate 0/1
passing so that it is factored into the final total.
'''
test_results = []
for test in self.tests:
found = False
for regr_result in regr_results:
if test == regr_result["name"]:
test_results.append(regr_result)
regr_result["mapped"] = True
found = True
break
# if a test was not found in regr results, indicate 0/1 passing
if map_full_testplan and not found:
test_results.append({"name": test, "passing": 0, "total": 1})
# if no written tests were indicated in the testplan, reuse planned
# test name and indicate 0/1 passing
if map_full_testplan and self.tests == []:
test_results.append({"name": self.name, "passing": 0, "total": 1})
# replace tests with test results
self.tests = test_results
return regr_results
def display(self):
print("testpoint: ", self.name)
print("description: ", self.desc)
print("milestone: ", self.milestone)
print("tests: ", self.tests)
class Testplan():
"""The full testplan
This comprises of TestplanEntry entries
"""
name = ""
entries = []
def __init__(self, name):
self.name = name
self.entries = []
if name == "":
print("Error: testplan name cannot be empty")
sys.exit(1)
def entry_exists(self, entry):
'''check if new entry has the same name as one of the existing entries
'''
for existing_entry in self.entries:
if entry.name == existing_entry.name:
print("Error: found a testplan entry with name = ", entry.name)
print("existing entry:\n", existing_entry)
print("new entry:\n", entry)
return True
return False
def add_entry(self, entry):
'''add a new entry into the testplan
'''
if self.entry_exists(entry): sys.exit(1)
self.entries.append(entry)
def sort(self):
'''sort entries by milestone
'''
self.entries = sorted(self.entries, key=lambda entry: entry.milestone)
def map_regr_results(self, regr_results, map_full_testplan=True):
'''map regression results to testplan entries
'''
def sum_results(totals, entry):
'''function to generate milestone and grand totals
'''
ms = entry.milestone
for test in entry.tests:
# Create dummy tests entry for milestone total
if totals[ms].tests == []:
totals[ms].tests = [{
"name": "TOTAL",
"passing": 0,
"total": 0
}]
# Sum milestone total
totals[ms].tests[0]["passing"] += test["passing"]
totals[ms].tests[0]["total"] += test["total"]
# Sum grand total
if ms != "N.A.":
totals["N.A."].tests[0]["passing"] += test["passing"]
totals["N.A."].tests[0]["total"] += test["total"]
return totals
totals = {}
# Create entry for total in each milestone; & the grand total.
for ms in TestplanEntry.milestones:
totals[ms] = TestplanEntry(name="N.A.",
desc="Total tests",
milestone=ms,
tests=[{
"name": "TOTAL",
"passing": 0,
"total": 0
}])
if ms != "N.A.": totals[ms].tests = []
for entry in self.entries:
regr_results = entry.map_regr_results(regr_results,
map_full_testplan)
totals = sum_results(totals, entry)
# extract unmapped tests from regr_results and create 'unmapped' entry
unmapped_regr_results = []
for regr_result in regr_results:
if not "mapped" in regr_result.keys():
unmapped_regr_results.append(regr_result)
unmapped = TestplanEntry(
name="Unmapped tests",
desc="""A list of tests in the regression result that are not
mapped to testplan entries.""",
milestone="N.A.",
tests=unmapped_regr_results)
totals = sum_results(totals, unmapped)
# add total back into 'entries'
for ms in TestplanEntry.milestones[1:]:
self.entries.append(totals[ms])
self.sort()
self.entries.append(unmapped)
self.entries.append(totals["N.A."])
def display(self):
'''display the complete testplan for debug
'''
print("name: ", self.name)
for entry in self.entries:
entry.display()
def get_milestone_regressions(self):
regressions = {}
for entry in self.entries:
# Skip if milestone is "n.a."
if entry.milestone not in entry.milestones[1:]: continue
# if ms key doesnt exist, create one
if entry.milestone not in regressions.keys():
regressions[entry.milestone] = []
# Append new tests to the list
for test in entry.tests:
if test not in regressions[entry.milestone] and test != "":
regressions[entry.milestone].append(test)
# Build regressions dict into a hjson like data structure
output = []
for ms in regressions.keys():
ms_dict = {}
ms_dict["name"] = ms
ms_dict["tests"] = regressions[ms]
output.append(ms_dict)
return output
def results_table(self, regr_results, map_full_testplan=True):
'''Print the mapped regression results into a table.
'''
self.map_regr_results(regr_results, map_full_testplan)
table = [["Milestone", "Name", "Tests", "Results"]]
align = ["center", "center", "right", "center"]
for entry in self.entries:
milestone = entry.milestone
entry_name = entry.name
if milestone == "N.A.": milestone = ""
if entry_name == "N.A.": entry_name = ""
for test in entry.tests:
results_str = str(test["passing"]) + "/" + str(test["total"])
table.append(
[milestone, entry_name, test["name"], results_str])
milestone = ""
entry_name = ""
return tabulate(table,
headers="firstrow",
tablefmt="pipe",
colalign=align)
|
{
"content_hash": "4ea2b621640f4e769a53de9abcc113aa",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 94,
"avg_line_length": 40.124161073825505,
"alnum_prop": 0.4999581834908422,
"repo_name": "chipsalliance/Surelog",
"id": "f38cbfb755654092ca17d91fedd13065a5e0c54c",
"size": "12127",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/tests/Opentitan/util/testplanner/class_defs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "155641"
},
{
"name": "C",
"bytes": "3114"
},
{
"name": "C++",
"bytes": "2808920"
},
{
"name": "CMake",
"bytes": "41750"
},
{
"name": "Forth",
"bytes": "81"
},
{
"name": "Makefile",
"bytes": "4820"
},
{
"name": "Nix",
"bytes": "784"
},
{
"name": "Python",
"bytes": "110922"
},
{
"name": "SWIG",
"bytes": "351"
},
{
"name": "Shell",
"bytes": "1349"
},
{
"name": "Slash",
"bytes": "37570"
},
{
"name": "SystemVerilog",
"bytes": "872314"
},
{
"name": "Tcl",
"bytes": "68865"
},
{
"name": "V",
"bytes": "1092"
},
{
"name": "Verilog",
"bytes": "495242"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='libovs',
version='0.0.1',
description='A simple OVSDB library',
author='Atzm WATANABE',
author_email='atzm@atzm.org',
license='BSD-2',
py_modules=['libovs'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
],
)
|
{
"content_hash": "c679d39054ac91f2ebc565b4b5231d74",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 49,
"avg_line_length": 24.72222222222222,
"alnum_prop": 0.5842696629213483,
"repo_name": "atzm/libovs",
"id": "be9521cd71564cf4a5d8133177c8549724feac57",
"size": "492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "7140"
}
],
"symlink_target": ""
}
|
"""Test compilation database flags generation."""
import imp
import platform
from os import path
from unittest import TestCase
from EasyClangComplete.plugin.flags_sources import compilation_db
from EasyClangComplete.plugin.utils import tools
from EasyClangComplete.plugin.utils import flag
from EasyClangComplete.plugin.utils import file
from EasyClangComplete.plugin.utils import search_scope
from EasyClangComplete.plugin.utils import singleton
imp.reload(compilation_db)
imp.reload(tools)
imp.reload(flag)
imp.reload(file)
imp.reload(search_scope)
CompilationDb = compilation_db.CompilationDb
ComplationDbCache = singleton.ComplationDbCache
SearchScope = search_scope.TreeSearchScope
Flag = flag.Flag
File = file.File
class TestCompilationDb(object):
"""Test generating flags with a 'compile_commands.json' file."""
def setUp(self):
"""Prepare the database."""
ComplationDbCache().clear()
def test_get_all_flags(self):
"""Test if compilation db is found."""
include_prefixes = ['-I']
db = CompilationDb(
include_prefixes,
header_to_source_map=[],
lazy_flag_parsing=self.lazy_parsing
)
expected = [Flag('-I', path.normpath('/lib_include_dir')),
Flag('', '-Dlib_EXPORTS'),
Flag('', '-fPIC')]
path_to_db = path.join(path.dirname(__file__),
'compilation_db_files',
'command')
scope = SearchScope(from_folder=path_to_db)
if self.lazy_parsing:
self.assertIsNone(db.get_flags(search_scope=scope))
else:
self.assertIn(expected[0], db.get_flags(search_scope=scope))
self.assertIn(expected[1], db.get_flags(search_scope=scope))
self.assertIn(expected[2], db.get_flags(search_scope=scope))
def test_strip_wrong_arguments(self):
"""Test if compilation db is found and flags loaded from arguments."""
include_prefixes = ['-I']
db = CompilationDb(
include_prefixes,
header_to_source_map=[],
lazy_flag_parsing=self.lazy_parsing
)
path_to_db = path.join(path.dirname(__file__),
'compilation_db_files',
'arguments')
scope = SearchScope(from_folder=path_to_db)
if self.lazy_parsing:
import sublime
if sublime.platform() != 'windows':
file_path = File.canonical_path("/home/user/dummy_lib.cpp")
self.assertIn(Flag('', '-Dlib_EXPORTS'),
db.get_flags(file_path=file_path,
search_scope=scope))
self.assertIn(Flag('', '-fPIC'),
db.get_flags(file_path=file_path,
search_scope=scope))
# Check that we don't get the 'all' entry.
self.assertIsNone(db.get_flags(search_scope=scope))
else:
expected = [Flag('-I', path.normpath('/lib_include_dir')),
Flag('', '-Dlib_EXPORTS'),
Flag('', '-fPIC')]
for expected_flag in expected:
self.assertIn(expected_flag, db.get_flags(search_scope=scope))
def test_get_flags_for_path(self):
"""Test if compilation db is found."""
include_prefixes = ['-I']
db = CompilationDb(
include_prefixes,
header_to_source_map=[],
lazy_flag_parsing=self.lazy_parsing
)
expected_lib = [Flag('', '-Dlib_EXPORTS'),
Flag('', '-fPIC')]
expected_main = Flag('-I', path.normpath('/lib_include_dir'))
lib_file_path = path.normpath('/home/user/dummy_lib.cpp')
main_file_path = path.normpath('/home/user/dummy_main.cpp')
# also try to test a header
lib_file_path_h = path.normpath('/home/user/dummy_lib.h')
path_to_db = path.join(path.dirname(__file__),
'compilation_db_files',
'command')
scope = SearchScope(from_folder=path_to_db)
self.assertIn(expected_lib[0], db.get_flags(lib_file_path, scope))
self.assertIn(expected_lib[0], db.get_flags(lib_file_path_h, scope))
self.assertIn(expected_main, db.get_flags(main_file_path, scope))
self.assertIn(lib_file_path, db._cache)
self.assertIn(main_file_path, db._cache)
path_to_db = path.join(path.dirname(__file__),
'compilation_db_files',
'command', 'compile_commands.json')
self.assertEqual(path_to_db,
db._cache[lib_file_path])
self.assertEqual(path_to_db,
db._cache[main_file_path])
if self.lazy_parsing:
self.assertNotIn(CompilationDb.ALL_TAG, db._cache[path_to_db])
else:
self.assertIn(expected_main,
db._cache[path_to_db][CompilationDb.ALL_TAG])
self.assertIn(
expected_lib[0], db._cache[path_to_db][CompilationDb.ALL_TAG])
self.assertIn(
expected_lib[1], db._cache[path_to_db][CompilationDb.ALL_TAG])
def test_no_db_in_folder(self):
"""Test that a non-existing file is not found in db."""
if platform.system() == "Darwin":
# This test is disabled as the current path is trying to reach a
# network resource on MacOS. I guess we have to deal with this at
# some point later.
return
include_prefixes = ['-I']
db = CompilationDb(
include_prefixes,
header_to_source_map=[],
lazy_flag_parsing=self.lazy_parsing
)
flags = db.get_flags(File.canonical_path('/home/user/dummy_main.cpp'))
self.assertTrue(flags is None)
def test_persistence(self):
"""Test if compilation db is persistent."""
include_prefixes = ['-I']
db = CompilationDb(
include_prefixes,
header_to_source_map=[],
lazy_flag_parsing=self.lazy_parsing
)
expected_main = Flag('-I', path.normpath('/lib_include_dir'))
lib_file_path = path.normpath('/home/user/dummy_lib.cpp')
main_file_path = path.normpath('/home/user/dummy_main.cpp')
path_to_db = path.join(path.dirname(__file__),
'compilation_db_files',
'command')
scope = SearchScope(from_folder=path_to_db)
self.assertIn(Flag('', '-Dlib_EXPORTS'),
db.get_flags(lib_file_path, scope))
self.assertIn(Flag('', '-fPIC'),
db.get_flags(lib_file_path, scope))
self.assertIn(expected_main, db.get_flags(main_file_path, scope))
# check persistence
self.assertGreater(len(db._cache), 2)
self.assertEqual(path.join(path_to_db, "compile_commands.json"),
db._cache[main_file_path])
self.assertEqual(path.join(path_to_db, "compile_commands.json"),
db._cache[lib_file_path])
def test_relative_directory(self):
"""Test if compilation db 'directory' records are applied."""
include_prefixes = ['-I', '-isystem']
db = CompilationDb(
include_prefixes,
header_to_source_map=[],
lazy_flag_parsing=self.lazy_parsing
)
expected = [Flag('-I', path.realpath('/foo/bar/test/include')),
Flag('-I', path.realpath('/foo/include')),
Flag('-isystem', path.realpath('/foo/bar/matilda'), ' ')]
path_to_db = path.realpath(
path.join(path.dirname(__file__),
'compilation_db_files',
'directory'))
scope = SearchScope(from_folder=path_to_db)
if self.lazy_parsing:
import sublime
if sublime.platform() != 'windows':
file_path = path.realpath(
path.join("/foo/bar/test", "test.cpp"))
self.assertEqual(expected, db.get_flags(file_path=file_path,
search_scope=scope))
# Check that we don't get the 'all' entry.
self.assertIsNone(db.get_flags(search_scope=scope))
else:
db.get_flags(search_scope=scope)
for expected_flag in expected:
self.assertIn(expected_flag, db.get_flags(search_scope=scope))
def test_get_c_flags(self):
"""Test argument filtering for c language."""
include_prefixes = ['-I']
db = CompilationDb(
include_prefixes,
header_to_source_map=[],
lazy_flag_parsing=self.lazy_parsing
)
main_file_path = path.normpath('/home/blah.c')
# also try to test a header
path_to_db = path.join(path.dirname(__file__),
'compilation_db_files',
'command_c')
scope = SearchScope(from_folder=path_to_db)
flags = db.get_flags(main_file_path, scope)
self.assertIn(Flag('', '-Wno-poison-system-directories'), flags)
self.assertIn(Flag('', '-march=armv7-a'), flags)
def test_get_c_flags_ccache(self):
"""Test argument filtering when ccache is used."""
include_prefixes = ['-I']
db = CompilationDb(
include_prefixes,
header_to_source_map=[],
lazy_flag_parsing=self.lazy_parsing
)
main_file_path = path.normpath('/home/blah.c')
# also try to test a header
path_to_db = path.join(path.dirname(__file__),
'compilation_db_files',
'command_c_ccache')
scope = SearchScope(from_folder=path_to_db)
flags = db.get_flags(main_file_path, scope)
self.assertNotIn(Flag('ccache', ''), flags)
self.assertNotIn(Flag('', 'ccache'), flags)
self.assertNotIn(Flag('cc', ''), flags)
self.assertNotIn(Flag('', 'cc'), flags)
self.assertIn(Flag('', '-Wno-poison-system-directories'), flags)
self.assertIn(Flag('', '-march=armv7-a'), flags)
def test_get_c_flags_ccache_irrelevant(self):
"""Test filtering when ccache string is not the first argument."""
include_prefixes = ['-I']
db = CompilationDb(
include_prefixes,
header_to_source_map=[],
lazy_flag_parsing=self.lazy_parsing
)
main_file_path = path.normpath('ccache')
# also try to test a header
path_to_db = path.join(path.dirname(__file__),
'compilation_db_files',
'command_c_ccache_irrelevant')
scope = SearchScope(from_folder=path_to_db)
flags = db.get_flags(main_file_path, scope)
self.assertNotIn(Flag('ccache', ''), flags)
self.assertNotIn(Flag('', 'ccache'), flags)
self.assertNotIn(Flag('cc', ''), flags)
self.assertNotIn(Flag('', 'cc'), flags)
self.assertIn(Flag('', '-Wno-poison-system-directories'), flags)
self.assertIn(Flag('', '-march=armv7-a'), flags)
class LazyParsing(TestCompilationDb, TestCase):
"""Test that we can parse DB with lazy parsing."""
lazy_parsing = True
class NonLazyParsing(TestCompilationDb, TestCase):
"""Test that we can parse DB WITHOUT lazy parsing."""
lazy_parsing = False
|
{
"content_hash": "a797221331b0d3b2ca9e175fd0f13b7c",
"timestamp": "",
"source": "github",
"line_count": 282,
"max_line_length": 78,
"avg_line_length": 41.46808510638298,
"alnum_prop": 0.5542158371814606,
"repo_name": "niosus/EasyClangComplete",
"id": "0eea22a32544f5034ce8ac571a3d99cd734b70f1",
"size": "11694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_compilation_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "469"
},
{
"name": "C++",
"bytes": "4969"
},
{
"name": "CMake",
"bytes": "1160"
},
{
"name": "CSS",
"bytes": "136"
},
{
"name": "Makefile",
"bytes": "444"
},
{
"name": "Objective-C",
"bytes": "4185"
},
{
"name": "Objective-C++",
"bytes": "87"
},
{
"name": "Python",
"bytes": "1615297"
},
{
"name": "Starlark",
"bytes": "105"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v12.services.types import customizer_attribute_service
from google.rpc import status_pb2 # type: ignore
from .transports.base import (
CustomizerAttributeServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import CustomizerAttributeServiceGrpcTransport
class CustomizerAttributeServiceClientMeta(type):
"""Metaclass for the CustomizerAttributeService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CustomizerAttributeServiceTransport]]
_transport_registry["grpc"] = CustomizerAttributeServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[CustomizerAttributeServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CustomizerAttributeServiceClient(
metaclass=CustomizerAttributeServiceClientMeta
):
"""Service to manage customizer attribute"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CustomizerAttributeServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CustomizerAttributeServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CustomizerAttributeServiceTransport:
"""Returns the transport used by the client instance.
Returns:
CustomizerAttributeServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def customizer_attribute_path(
customer_id: str, customizer_attribute_id: str,
) -> str:
"""Returns a fully-qualified customizer_attribute string."""
return "customers/{customer_id}/customizerAttributes/{customizer_attribute_id}".format(
customer_id=customer_id,
customizer_attribute_id=customizer_attribute_id,
)
@staticmethod
def parse_customizer_attribute_path(path: str) -> Dict[str, str]:
"""Parses a customizer_attribute path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/customizerAttributes/(?P<customizer_attribute_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, CustomizerAttributeServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the customizer attribute service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, CustomizerAttributeServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CustomizerAttributeServiceTransport):
# transport is a CustomizerAttributeServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_customizer_attributes(
self,
request: Union[
customizer_attribute_service.MutateCustomizerAttributesRequest, dict
] = None,
*,
customer_id: str = None,
operations: Sequence[
customizer_attribute_service.CustomizerAttributeOperation
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> customizer_attribute_service.MutateCustomizerAttributesResponse:
r"""Creates, updates or removes customizer attributes.
Operation statuses are returned.
Args:
request (Union[google.ads.googleads.v12.services.types.MutateCustomizerAttributesRequest, dict]):
The request object. Request message for
[CustomizerAttributeService.MutateCustomizerAttributes][google.ads.googleads.v12.services.CustomizerAttributeService.MutateCustomizerAttributes].
customer_id (str):
Required. The ID of the customer
whose customizer attributes are being
modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v12.services.types.CustomizerAttributeOperation]):
Required. The list of operations to
perform on individual customizer
attributes.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v12.services.types.MutateCustomizerAttributesResponse:
Response message for an customizer
attribute mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a customizer_attribute_service.MutateCustomizerAttributesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
customizer_attribute_service.MutateCustomizerAttributesRequest,
):
request = customizer_attribute_service.MutateCustomizerAttributesRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_customizer_attributes
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-ads",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("CustomizerAttributeServiceClient",)
|
{
"content_hash": "2b7e3b3a19614dca7563f499f5c28899",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 161,
"avg_line_length": 40.95426195426195,
"alnum_prop": 0.6198791816843494,
"repo_name": "googleads/google-ads-python",
"id": "84668a7dee258e18ef99c93f18b69abe0795076b",
"size": "20299",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/services/services/customizer_attribute_service/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
}
|
u"""
Created on 2017-1-6
@author: cheng.li
"""
import unittest
import numpy as np
import pandas as pd
from PyFin.Enums import Factors
from PyFin.Analysis.SecurityValueHolders import SecurityLatestValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSRankedSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSTopNSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSBottomNSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSTopNPercentileSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSBottomNPercentileSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSAverageSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSAverageAdjustedSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSZScoreSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSFillNASecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSPercentileSecurityValueHolder
from PyFin.Analysis.CrossSectionValueHolders import CSResidueSecurityValueHolder
class TestCrossSectionValueHolder(unittest.TestCase):
def setUp(self):
np.random.seed(0)
sample1 = np.random.randn(1000, 6)
sample2 = np.random.randn(1000, 6)
self.datas = {'aapl': {'close': sample1[:, 0], 'open': sample1[:, 1]},
'ibm': {'close': sample2[:, 0], 'open': sample2[:, 1]},
'goog': {'close': sample1[:, 2], 'open': sample1[:, 3]},
'baba': {'close': sample2[:, 2], 'open': sample2[:, 3]},
'tela': {'close': sample1[:, 4], 'open': sample1[:, 5]},
'nflx': {'close': sample2[:, 4], 'open': sample2[:, 5]}
}
def testCSRankedSecurityValueHolderWithSymbolName(self):
benchmark = SecurityLatestValueHolder(x='close')
rankHolder = CSRankedSecurityValueHolder('close')
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]}}
benchmark.push(data)
rankHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.rank().values, rankHolder.value.values)
def testCSRankedSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
rankHolder = CSRankedSecurityValueHolder(benchmark)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]}}
benchmark.push(data)
rankHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.rank().values, rankHolder.value.values)
def testCSTopNSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
n = 2
topnHolder = CSTopNSecurityValueHolder(benchmark, n)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}}
benchmark.push(data)
topnHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal((-benchmarkValues).rank().values <= n, topnHolder.value.values)
def testCSBottomNSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
n = 2
topnHolder = CSBottomNSecurityValueHolder(benchmark, n)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}}
benchmark.push(data)
topnHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.rank().values <= n, topnHolder.value.values)
def testCSRankedSecurityValueHolderWithGroups(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
rankHolder = CSRankedSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
rankHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups).rank().values
np.testing.assert_array_almost_equal(expected_rank, rankHolder.value.values)
def testCSAverageSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
meanHolder = CSAverageSecurityValueHolder(benchmark)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]}}
benchmark.push(data)
meanHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.values.mean(), meanHolder.value.values)
def testCSAverageSecurityValueHolderWithGroup(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
meanHolder = CSAverageSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
meanHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_mean = pd.Series(benchmarkValues.to_dict()).groupby(groups).mean()
calculated_mean = meanHolder.value
for name in calculated_mean.index():
if name in ['aapl', 'ibm']:
self.assertAlmostEqual(calculated_mean[name], expected_mean[1])
else:
self.assertAlmostEqual(calculated_mean[name], expected_mean[2])
def testCSPercentileSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
perHolder = CSPercentileSecurityValueHolder(benchmark)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}
}
benchmark.push(data)
perHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(benchmarkValues.rank().values / len(data), perHolder.value.values)
def testCSTopNPercentileSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
n = 0.3
perHolder = CSTopNPercentileSecurityValueHolder(benchmark, n)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}
}
benchmark.push(data)
perHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal(((-benchmarkValues).rank().values / len(data)) <= n,
perHolder.value.values)
def testCSBottomNPercentileSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
n = 0.3
perHolder = CSBottomNPercentileSecurityValueHolder(benchmark, n)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i]},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i]}
}
benchmark.push(data)
perHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal((benchmarkValues.rank().values / len(data)) <= n,
perHolder.value.values)
def testCSPercentileSecurityValueHolderWithGroups(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
perHolder = CSPercentileSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
perHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups) \
.transform(lambda x: x.rank().values / len(x))
np.testing.assert_array_almost_equal(expected_rank, perHolder.value.values)
def testCSAverageAdjustedSecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
meanAdjustedHolder = CSAverageAdjustedSecurityValueHolder(benchmark)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]},
}
benchmark.push(data)
meanAdjustedHolder.push(data)
benchmarkValues = benchmark.value
np.testing.assert_array_almost_equal((benchmarkValues - benchmarkValues.mean()).values, meanAdjustedHolder.value.values)
def testCSAverageAdjustedSecurityValueHolderWithGroups(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
meanAdjustedHolder = CSAverageAdjustedSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
meanAdjustedHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups) \
.transform(lambda x: x - x.mean())
np.testing.assert_array_almost_equal(expected_rank, meanAdjustedHolder.value.values)
def testCSZscoreSecurityValueHolder(self):
keys = list(range(1, 11))
values = list(range(10, 0, -1))
data = {}
for i, k in enumerate(keys):
data[k] = {}
data[k]['close'] = values[i]
quantile_value = CSZScoreSecurityValueHolder('close')
quantile_value.push(data)
calculated = quantile_value.value
data = np.linspace(10., 1., 10)
expected = (data - data.mean()) / data.std()
np.testing.assert_array_almost_equal(expected, calculated.values)
def testCSFillNASecurityValueHolder(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
meanAdjustedHolder = CSFillNASecurityValueHolder(benchmark, groups)
def cal_func(x):
x[np.isnan(x)] = np.nanmean(x)
return x
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'tela': {Factors.CLOSE: np.nan,
Factors.OPEN: self.datas['tela'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: np.nan,
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.},
'nflx': {Factors.CLOSE: self.datas['nflx'][Factors.CLOSE][i],
Factors.OPEN: self.datas['nflx'][Factors.OPEN][i],
'ind': 2.}
}
benchmark.push(data)
meanAdjustedHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'tela': 1., 'goog': 2., 'baba': 2., 'nflx': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups) \
.transform(cal_func)
np.testing.assert_array_almost_equal(expected_rank, meanAdjustedHolder.value.values)
def testCSZscoreSecurityValueHolderWithGroups(self):
benchmark = SecurityLatestValueHolder(x='close')
groups = SecurityLatestValueHolder(x='ind')
meanAdjustedHolder = CSZScoreSecurityValueHolder(benchmark, groups)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i],
'ind': 1.},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i],
'ind': 1.},
'goog': {Factors.CLOSE: self.datas['goog'][Factors.CLOSE][i],
Factors.OPEN: self.datas['goog'][Factors.OPEN][i],
'ind': 2.},
'baba': {Factors.CLOSE: self.datas['baba'][Factors.CLOSE][i],
Factors.OPEN: self.datas['baba'][Factors.OPEN][i],
'ind': 2.}}
benchmark.push(data)
meanAdjustedHolder.push(data)
benchmarkValues = benchmark.value
groups = {'aapl': 1., 'ibm': 1., 'goog': 2., 'baba': 2.}
expected_rank = pd.Series(benchmarkValues.to_dict()).groupby(groups) \
.transform(lambda x: (x - x.mean()) / x.std(ddof=0))
np.testing.assert_array_almost_equal(expected_rank, meanAdjustedHolder.value.values)
def testCSZResidueSecurityValueHolder(self):
y = SecurityLatestValueHolder(x='close')
x = SecurityLatestValueHolder(x='open')
res = CSResidueSecurityValueHolder(y, x)
for i in range(len(self.datas['aapl']['close'])):
data = {'aapl': {Factors.CLOSE: self.datas['aapl'][Factors.CLOSE][i],
Factors.OPEN: self.datas['aapl'][Factors.OPEN][i]},
'ibm': {Factors.CLOSE: self.datas['ibm'][Factors.CLOSE][i],
Factors.OPEN: self.datas['ibm'][Factors.OPEN][i]}}
y.push(data)
x.push(data)
res.push(data)
calculated = res.value.values
y_values = y.value.values
x_values = x.value.values
x_values = np.concatenate([np.ones(shape=(len(x_values), 1)), x_values.reshape(-1, 1)], axis=1)
beta = np.dot(np.linalg.inv(np.dot(x_values.T, x_values)), np.dot(x_values.T, y_values.reshape(-1, 1)))
expected = y_values - np.dot(x_values, beta).flatten()
np.testing.assert_array_almost_equal(calculated, expected)
|
{
"content_hash": "09c7f7a5b7de2481c0150ffe8ac33ae7",
"timestamp": "",
"source": "github",
"line_count": 409,
"max_line_length": 132,
"avg_line_length": 54.298288508557455,
"alnum_prop": 0.5503422190201729,
"repo_name": "wegamekinglc/Finance-Python",
"id": "c330e784330c39013feebaab317341baa7c97aa9",
"size": "22232",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "PyFin/tests/Analysis/testCrossSectionValueHolders.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "49381"
},
{
"name": "Python",
"bytes": "801283"
}
],
"symlink_target": ""
}
|
import copy
import re
import sys
import tempfile
import unittest
from unittest.test.testmock.support import is_instance
from unittest import mock
from unittest.mock import (
call, DEFAULT, patch, sentinel,
MagicMock, Mock, NonCallableMock,
NonCallableMagicMock, AsyncMock, _Call, _CallList,
create_autospec
)
class Iter(object):
def __init__(self):
self.thing = iter(['this', 'is', 'an', 'iter'])
def __iter__(self):
return self
def next(self):
return next(self.thing)
__next__ = next
class Something(object):
def meth(self, a, b, c, d=None): pass
@classmethod
def cmeth(cls, a, b, c, d=None): pass
@staticmethod
def smeth(a, b, c, d=None): pass
def something(a): pass
class MockTest(unittest.TestCase):
def test_all(self):
# if __all__ is badly defined then import * will raise an error
# We have to exec it because you can't import * inside a method
# in Python 3
exec("from unittest.mock import *")
def test_constructor(self):
mock = Mock()
self.assertFalse(mock.called, "called not initialised correctly")
self.assertEqual(mock.call_count, 0,
"call_count not initialised correctly")
self.assertTrue(is_instance(mock.return_value, Mock),
"return_value not initialised correctly")
self.assertEqual(mock.call_args, None,
"call_args not initialised correctly")
self.assertEqual(mock.call_args_list, [],
"call_args_list not initialised correctly")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly")
# Can't use hasattr for this test as it always returns True on a mock
self.assertNotIn('_items', mock.__dict__,
"default mock should not have '_items' attribute")
self.assertIsNone(mock._mock_parent,
"parent not initialised correctly")
self.assertIsNone(mock._mock_methods,
"methods not initialised correctly")
self.assertEqual(mock._mock_children, {},
"children not initialised incorrectly")
def test_return_value_in_constructor(self):
mock = Mock(return_value=None)
self.assertIsNone(mock.return_value,
"return value in constructor not honoured")
def test_change_return_value_via_delegate(self):
def f(): pass
mock = create_autospec(f)
mock.mock.return_value = 1
self.assertEqual(mock(), 1)
def test_change_side_effect_via_delegate(self):
def f(): pass
mock = create_autospec(f)
mock.mock.side_effect = TypeError()
with self.assertRaises(TypeError):
mock()
def test_repr(self):
mock = Mock(name='foo')
self.assertIn('foo', repr(mock))
self.assertIn("'%s'" % id(mock), repr(mock))
mocks = [(Mock(), 'mock'), (Mock(name='bar'), 'bar')]
for mock, name in mocks:
self.assertIn('%s.bar' % name, repr(mock.bar))
self.assertIn('%s.foo()' % name, repr(mock.foo()))
self.assertIn('%s.foo().bing' % name, repr(mock.foo().bing))
self.assertIn('%s()' % name, repr(mock()))
self.assertIn('%s()()' % name, repr(mock()()))
self.assertIn('%s()().foo.bar.baz().bing' % name,
repr(mock()().foo.bar.baz().bing))
def test_repr_with_spec(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec=X())
self.assertIn(" spec='X' ", repr(mock))
mock = Mock(spec_set=X)
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec_set=X())
self.assertIn(" spec_set='X' ", repr(mock))
mock = Mock(spec=X, name='foo')
self.assertIn(" spec='X' ", repr(mock))
self.assertIn(" name='foo' ", repr(mock))
mock = Mock(name='foo')
self.assertNotIn("spec", repr(mock))
mock = Mock()
self.assertNotIn("spec", repr(mock))
mock = Mock(spec=['foo'])
self.assertNotIn("spec", repr(mock))
def test_side_effect(self):
mock = Mock()
def effect(*args, **kwargs):
raise SystemError('kablooie')
mock.side_effect = effect
self.assertRaises(SystemError, mock, 1, 2, fish=3)
mock.assert_called_with(1, 2, fish=3)
results = [1, 2, 3]
def effect():
return results.pop()
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"side effect not used correctly")
mock = Mock(side_effect=sentinel.SideEffect)
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side effect in constructor not used")
def side_effect():
return DEFAULT
mock = Mock(side_effect=side_effect, return_value=sentinel.RETURN)
self.assertEqual(mock(), sentinel.RETURN)
def test_autospec_side_effect(self):
# Test for issue17826
results = [1, 2, 3]
def effect():
return results.pop()
def f(): pass
mock = create_autospec(f)
mock.side_effect = [1, 2, 3]
self.assertEqual([mock(), mock(), mock()], [1, 2, 3],
"side effect not used correctly in create_autospec")
# Test where side effect is a callable
results = [1, 2, 3]
mock = create_autospec(f)
mock.side_effect = effect
self.assertEqual([mock(), mock(), mock()], [3, 2, 1],
"callable side effect not used correctly")
def test_autospec_side_effect_exception(self):
# Test for issue 23661
def f(): pass
mock = create_autospec(f)
mock.side_effect = ValueError('Bazinga!')
self.assertRaisesRegex(ValueError, 'Bazinga!', mock)
def test_reset_mock(self):
parent = Mock()
spec = ["something"]
mock = Mock(name="child", parent=parent, spec=spec)
mock(sentinel.Something, something=sentinel.SomethingElse)
something = mock.something
mock.something()
mock.side_effect = sentinel.SideEffect
return_value = mock.return_value
return_value()
mock.reset_mock()
self.assertEqual(mock._mock_name, "child",
"name incorrectly reset")
self.assertEqual(mock._mock_parent, parent,
"parent incorrectly reset")
self.assertEqual(mock._mock_methods, spec,
"methods incorrectly reset")
self.assertFalse(mock.called, "called not reset")
self.assertEqual(mock.call_count, 0, "call_count not reset")
self.assertEqual(mock.call_args, None, "call_args not reset")
self.assertEqual(mock.call_args_list, [], "call_args_list not reset")
self.assertEqual(mock.method_calls, [],
"method_calls not initialised correctly: %r != %r" %
(mock.method_calls, []))
self.assertEqual(mock.mock_calls, [])
self.assertEqual(mock.side_effect, sentinel.SideEffect,
"side_effect incorrectly reset")
self.assertEqual(mock.return_value, return_value,
"return_value incorrectly reset")
self.assertFalse(return_value.called, "return value mock not reset")
self.assertEqual(mock._mock_children, {'something': something},
"children reset incorrectly")
self.assertEqual(mock.something, something,
"children incorrectly cleared")
self.assertFalse(mock.something.called, "child not reset")
def test_reset_mock_recursion(self):
mock = Mock()
mock.return_value = mock
# used to cause recursion
mock.reset_mock()
def test_reset_mock_on_mock_open_issue_18622(self):
a = mock.mock_open()
a.reset_mock()
def test_call(self):
mock = Mock()
self.assertTrue(is_instance(mock.return_value, Mock),
"Default return_value should be a Mock")
result = mock()
self.assertEqual(mock(), result,
"different result from consecutive calls")
mock.reset_mock()
ret_val = mock(sentinel.Arg)
self.assertTrue(mock.called, "called not set")
self.assertEqual(mock.call_count, 1, "call_count incoreect")
self.assertEqual(mock.call_args, ((sentinel.Arg,), {}),
"call_args not set")
self.assertEqual(mock.call_args.args, (sentinel.Arg,),
"call_args not set")
self.assertEqual(mock.call_args.kwargs, {},
"call_args not set")
self.assertEqual(mock.call_args_list, [((sentinel.Arg,), {})],
"call_args_list not initialised correctly")
mock.return_value = sentinel.ReturnValue
ret_val = mock(sentinel.Arg, key=sentinel.KeyArg)
self.assertEqual(ret_val, sentinel.ReturnValue,
"incorrect return value")
self.assertEqual(mock.call_count, 2, "call_count incorrect")
self.assertEqual(mock.call_args,
((sentinel.Arg,), {'key': sentinel.KeyArg}),
"call_args not set")
self.assertEqual(mock.call_args_list, [
((sentinel.Arg,), {}),
((sentinel.Arg,), {'key': sentinel.KeyArg})
],
"call_args_list not set")
def test_call_args_comparison(self):
mock = Mock()
mock()
mock(sentinel.Arg)
mock(kw=sentinel.Kwarg)
mock(sentinel.Arg, kw=sentinel.Kwarg)
self.assertEqual(mock.call_args_list, [
(),
((sentinel.Arg,),),
({"kw": sentinel.Kwarg},),
((sentinel.Arg,), {"kw": sentinel.Kwarg})
])
self.assertEqual(mock.call_args,
((sentinel.Arg,), {"kw": sentinel.Kwarg}))
self.assertEqual(mock.call_args.args, (sentinel.Arg,))
self.assertEqual(mock.call_args.kwargs, {"kw": sentinel.Kwarg})
# Comparing call_args to a long sequence should not raise
# an exception. See issue 24857.
self.assertFalse(mock.call_args == "a long sequence")
def test_calls_equal_with_any(self):
# Check that equality and non-equality is consistent even when
# comparing with mock.ANY
mm = mock.MagicMock()
self.assertTrue(mm == mm)
self.assertFalse(mm != mm)
self.assertFalse(mm == mock.MagicMock())
self.assertTrue(mm != mock.MagicMock())
self.assertTrue(mm == mock.ANY)
self.assertFalse(mm != mock.ANY)
self.assertTrue(mock.ANY == mm)
self.assertFalse(mock.ANY != mm)
call1 = mock.call(mock.MagicMock())
call2 = mock.call(mock.ANY)
self.assertTrue(call1 == call2)
self.assertFalse(call1 != call2)
self.assertTrue(call2 == call1)
self.assertFalse(call2 != call1)
def test_assert_called_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_with()
self.assertRaises(AssertionError, mock.assert_called_with, 1)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_with)
mock(1, 2, 3, a='fish', b='nothing')
mock.assert_called_with(1, 2, 3, a='fish', b='nothing')
def test_assert_called_with_any(self):
m = MagicMock()
m(MagicMock())
m.assert_called_with(mock.ANY)
def test_assert_called_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock.assert_called_with(1, 2, 3)
mock.assert_called_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_with,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_called_with(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_assert_called_with_method_spec(self):
def _check(mock):
mock(1, b=2, c=3)
mock.assert_called_with(1, 2, 3)
mock.assert_called_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_with,
1, b=3, c=2)
mock = Mock(spec=Something().meth)
_check(mock)
mock = Mock(spec=Something.cmeth)
_check(mock)
mock = Mock(spec=Something().cmeth)
_check(mock)
mock = Mock(spec=Something.smeth)
_check(mock)
mock = Mock(spec=Something().smeth)
_check(mock)
def test_assert_called_exception_message(self):
msg = "Expected '{0}' to have been called"
with self.assertRaisesRegex(AssertionError, msg.format('mock')):
Mock().assert_called()
with self.assertRaisesRegex(AssertionError, msg.format('test_name')):
Mock(name="test_name").assert_called()
def test_assert_called_once_with(self):
mock = Mock()
mock()
# Will raise an exception if it fails
mock.assert_called_once_with()
mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock.reset_mock()
self.assertRaises(AssertionError, mock.assert_called_once_with)
mock('foo', 'bar', baz=2)
mock.assert_called_once_with('foo', 'bar', baz=2)
mock.reset_mock()
mock('foo', 'bar', baz=2)
self.assertRaises(
AssertionError,
lambda: mock.assert_called_once_with('bob', 'bar', baz=2)
)
def test_assert_called_once_with_call_list(self):
m = Mock()
m(1)
m(2)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1), call(2)]"),
lambda: m.assert_called_once_with(2))
def test_assert_called_once_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock.assert_called_once_with(1, 2, 3)
mock.assert_called_once_with(a=1, b=2, c=3)
self.assertRaises(AssertionError, mock.assert_called_once_with,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_called_once_with(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
# Mock called more than once => always fails
mock(4, 5, 6)
self.assertRaises(AssertionError, mock.assert_called_once_with,
1, 2, 3)
self.assertRaises(AssertionError, mock.assert_called_once_with,
4, 5, 6)
def test_attribute_access_returns_mocks(self):
mock = Mock()
something = mock.something
self.assertTrue(is_instance(something, Mock), "attribute isn't a mock")
self.assertEqual(mock.something, something,
"different attributes returned for same name")
# Usage example
mock = Mock()
mock.something.return_value = 3
self.assertEqual(mock.something(), 3, "method returned wrong value")
self.assertTrue(mock.something.called,
"method didn't record being called")
def test_attributes_have_name_and_parent_set(self):
mock = Mock()
something = mock.something
self.assertEqual(something._mock_name, "something",
"attribute name not set correctly")
self.assertEqual(something._mock_parent, mock,
"attribute parent not set correctly")
def test_method_calls_recorded(self):
mock = Mock()
mock.something(3, fish=None)
mock.something_else.something(6, cake=sentinel.Cake)
self.assertEqual(mock.something_else.method_calls,
[("something", (6,), {'cake': sentinel.Cake})],
"method calls not recorded correctly")
self.assertEqual(mock.method_calls, [
("something", (3,), {'fish': None}),
("something_else.something", (6,), {'cake': sentinel.Cake})
],
"method calls not recorded correctly")
def test_method_calls_compare_easily(self):
mock = Mock()
mock.something()
self.assertEqual(mock.method_calls, [('something',)])
self.assertEqual(mock.method_calls, [('something', (), {})])
mock = Mock()
mock.something('different')
self.assertEqual(mock.method_calls, [('something', ('different',))])
self.assertEqual(mock.method_calls,
[('something', ('different',), {})])
mock = Mock()
mock.something(x=1)
self.assertEqual(mock.method_calls, [('something', {'x': 1})])
self.assertEqual(mock.method_calls, [('something', (), {'x': 1})])
mock = Mock()
mock.something('different', some='more')
self.assertEqual(mock.method_calls, [
('something', ('different',), {'some': 'more'})
])
def test_only_allowed_methods_exist(self):
for spec in ['something'], ('something',):
for arg in 'spec', 'spec_set':
mock = Mock(**{arg: spec})
# this should be allowed
mock.something
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute 'something_else'",
getattr, mock, 'something_else'
)
def test_from_spec(self):
class Something(object):
x = 3
__something__ = None
def y(self): pass
def test_attributes(mock):
# should work
mock.x
mock.y
mock.__something__
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute 'z'",
getattr, mock, 'z'
)
self.assertRaisesRegex(
AttributeError,
"Mock object has no attribute '__foobar__'",
getattr, mock, '__foobar__'
)
test_attributes(Mock(spec=Something))
test_attributes(Mock(spec=Something()))
def test_wraps_calls(self):
real = Mock()
mock = Mock(wraps=real)
self.assertEqual(mock(), real())
real.reset_mock()
mock(1, 2, fish=3)
real.assert_called_with(1, 2, fish=3)
def test_wraps_prevents_automatic_creation_of_mocks(self):
class Real(object):
pass
real = Real()
mock = Mock(wraps=real)
self.assertRaises(AttributeError, lambda: mock.new_attr())
def test_wraps_call_with_nondefault_return_value(self):
real = Mock()
mock = Mock(wraps=real)
mock.return_value = 3
self.assertEqual(mock(), 3)
self.assertFalse(real.called)
def test_wraps_attributes(self):
class Real(object):
attribute = Mock()
real = Real()
mock = Mock(wraps=real)
self.assertEqual(mock.attribute(), real.attribute())
self.assertRaises(AttributeError, lambda: mock.fish)
self.assertNotEqual(mock.attribute, real.attribute)
result = mock.attribute.frog(1, 2, fish=3)
Real.attribute.frog.assert_called_with(1, 2, fish=3)
self.assertEqual(result, Real.attribute.frog())
def test_customize_wrapped_object_with_side_effect_iterable_with_default(self):
class Real(object):
def method(self):
return sentinel.ORIGINAL_VALUE
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, DEFAULT]
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.ORIGINAL_VALUE)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_side_effect_iterable(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, sentinel.VALUE2]
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.VALUE2)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_side_effect_exception(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = RuntimeError
self.assertRaises(RuntimeError, mock.method)
def test_customize_wrapped_object_with_side_effect_function(self):
class Real(object):
def method(self): pass
def side_effect():
return sentinel.VALUE
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = side_effect
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.return_value = sentinel.VALUE
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value_and_side_effect(self):
# side_effect should always take precedence over return_value.
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, sentinel.VALUE2]
mock.method.return_value = sentinel.WRONG_VALUE
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.VALUE2)
self.assertRaises(StopIteration, mock.method)
def test_customize_wrapped_object_with_return_value_and_side_effect2(self):
# side_effect can return DEFAULT to default to return_value
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = lambda: DEFAULT
mock.method.return_value = sentinel.VALUE
self.assertEqual(mock.method(), sentinel.VALUE)
def test_customize_wrapped_object_with_return_value_and_side_effect_default(self):
class Real(object):
def method(self): pass
real = Real()
mock = Mock(wraps=real)
mock.method.side_effect = [sentinel.VALUE1, DEFAULT]
mock.method.return_value = sentinel.RETURN
self.assertEqual(mock.method(), sentinel.VALUE1)
self.assertEqual(mock.method(), sentinel.RETURN)
self.assertRaises(StopIteration, mock.method)
def test_exceptional_side_effect(self):
mock = Mock(side_effect=AttributeError)
self.assertRaises(AttributeError, mock)
mock = Mock(side_effect=AttributeError('foo'))
self.assertRaises(AttributeError, mock)
def test_baseexceptional_side_effect(self):
mock = Mock(side_effect=KeyboardInterrupt)
self.assertRaises(KeyboardInterrupt, mock)
mock = Mock(side_effect=KeyboardInterrupt('foo'))
self.assertRaises(KeyboardInterrupt, mock)
def test_assert_called_with_message(self):
mock = Mock()
self.assertRaisesRegex(AssertionError, 'not called',
mock.assert_called_with)
def test_assert_called_once_with_message(self):
mock = Mock(name='geoffrey')
self.assertRaisesRegex(AssertionError,
r"Expected 'geoffrey' to be called once\.",
mock.assert_called_once_with)
def test__name__(self):
mock = Mock()
self.assertRaises(AttributeError, lambda: mock.__name__)
mock.__name__ = 'foo'
self.assertEqual(mock.__name__, 'foo')
def test_spec_list_subclass(self):
class Sub(list):
pass
mock = Mock(spec=Sub(['foo']))
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock, 'foo')
def test_spec_class(self):
class X(object):
pass
mock = Mock(spec=X)
self.assertIsInstance(mock, X)
mock = Mock(spec=X())
self.assertIsInstance(mock, X)
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertIsInstance(mock, X)
mock = Mock(spec_set=X())
self.assertIsInstance(mock, X)
def test_spec_class_no_object_base(self):
class X:
pass
mock = Mock(spec=X)
self.assertIsInstance(mock, X)
mock = Mock(spec=X())
self.assertIsInstance(mock, X)
self.assertIs(mock.__class__, X)
self.assertEqual(Mock().__class__.__name__, 'Mock')
mock = Mock(spec_set=X)
self.assertIsInstance(mock, X)
mock = Mock(spec_set=X())
self.assertIsInstance(mock, X)
def test_setting_attribute_with_spec_set(self):
class X(object):
y = 3
mock = Mock(spec=X)
mock.x = 'foo'
mock = Mock(spec_set=X)
def set_attr():
mock.x = 'foo'
mock.y = 'foo'
self.assertRaises(AttributeError, set_attr)
def test_copy(self):
current = sys.getrecursionlimit()
self.addCleanup(sys.setrecursionlimit, current)
# can't use sys.maxint as this doesn't exist in Python 3
sys.setrecursionlimit(int(10e8))
# this segfaults without the fix in place
copy.copy(Mock())
def test_subclass_with_properties(self):
class SubClass(Mock):
def _get(self):
return 3
def _set(self, value):
raise NameError('strange error')
some_attribute = property(_get, _set)
s = SubClass(spec_set=SubClass)
self.assertEqual(s.some_attribute, 3)
def test():
s.some_attribute = 3
self.assertRaises(NameError, test)
def test():
s.foo = 'bar'
self.assertRaises(AttributeError, test)
def test_setting_call(self):
mock = Mock()
def __call__(self, a):
self._increment_mock_call(a)
return self._mock_call(a)
type(mock).__call__ = __call__
mock('one')
mock.assert_called_with('one')
self.assertRaises(TypeError, mock, 'one', 'two')
def test_dir(self):
mock = Mock()
attrs = set(dir(mock))
type_attrs = set([m for m in dir(Mock) if not m.startswith('_')])
# all public attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
# creates these attributes
mock.a, mock.b
self.assertIn('a', dir(mock))
self.assertIn('b', dir(mock))
# instance attributes
mock.c = mock.d = None
self.assertIn('c', dir(mock))
self.assertIn('d', dir(mock))
# magic methods
mock.__iter__ = lambda s: iter([])
self.assertIn('__iter__', dir(mock))
def test_dir_from_spec(self):
mock = Mock(spec=unittest.TestCase)
testcase_attrs = set(dir(unittest.TestCase))
attrs = set(dir(mock))
# all attributes from the spec are included
self.assertEqual(set(), testcase_attrs - attrs)
# shadow a sys attribute
mock.version = 3
self.assertEqual(dir(mock).count('version'), 1)
def test_filter_dir(self):
patcher = patch.object(mock, 'FILTER_DIR', False)
patcher.start()
try:
attrs = set(dir(Mock()))
type_attrs = set(dir(Mock))
# ALL attributes from the type are included
self.assertEqual(set(), type_attrs - attrs)
finally:
patcher.stop()
def test_dir_does_not_include_deleted_attributes(self):
mock = Mock()
mock.child.return_value = 1
self.assertIn('child', dir(mock))
del mock.child
self.assertNotIn('child', dir(mock))
def test_configure_mock(self):
mock = Mock(foo='bar')
self.assertEqual(mock.foo, 'bar')
mock = MagicMock(foo='bar')
self.assertEqual(mock.foo, 'bar')
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
mock = Mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
mock = Mock()
mock.configure_mock(**kwargs)
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def assertRaisesWithMsg(self, exception, message, func, *args, **kwargs):
# needed because assertRaisesRegex doesn't work easily with newlines
with self.assertRaises(exception) as context:
func(*args, **kwargs)
msg = str(context.exception)
self.assertEqual(msg, message)
def test_assert_called_with_failure_message(self):
mock = NonCallableMock()
actual = 'not called.'
expected = "mock(1, '2', 3, bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
mock.assert_called_with, 1, '2', 3, bar='foo'
)
mock.foo(1, '2', 3, foo='foo')
asserters = [
mock.foo.assert_called_with, mock.foo.assert_called_once_with
]
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, '2', 3, bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, '2', 3, bar='foo'
)
# just kwargs
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(bar='foo')"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, bar='foo'
)
# just args
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo(1, 2, 3)"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual),
meth, 1, 2, 3
)
# empty
for meth in asserters:
actual = "foo(1, '2', 3, foo='foo')"
expected = "foo()"
message = 'expected call not found.\nExpected: %s\nActual: %s'
self.assertRaisesWithMsg(
AssertionError, message % (expected, actual), meth
)
def test_mock_calls(self):
mock = MagicMock()
# need to do this because MagicMock.mock_calls used to just return
# a MagicMock which also returned a MagicMock when __eq__ was called
self.assertIs(mock.mock_calls == [], True)
mock = MagicMock()
mock()
expected = [('', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock.foo()
expected.append(call.foo())
self.assertEqual(mock.mock_calls, expected)
# intermediate mock_calls work too
self.assertEqual(mock.foo.mock_calls, [('', (), {})])
mock = MagicMock()
mock().foo(1, 2, 3, a=4, b=5)
expected = [
('', (), {}), ('().foo', (1, 2, 3), dict(a=4, b=5))
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.return_value.foo.mock_calls,
[('', (1, 2, 3), dict(a=4, b=5))])
self.assertEqual(mock.return_value.mock_calls,
[('foo', (1, 2, 3), dict(a=4, b=5))])
mock = MagicMock()
mock().foo.bar().baz()
expected = [
('', (), {}), ('().foo.bar', (), {}),
('().foo.bar().baz', (), {})
]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().call_list())
for kwargs in dict(), dict(name='bar'):
mock = MagicMock(**kwargs)
int(mock.foo)
expected = [('foo.__int__', (), {})]
self.assertEqual(mock.mock_calls, expected)
mock = MagicMock(**kwargs)
mock.a()()
expected = [('a', (), {}), ('a()', (), {})]
self.assertEqual(mock.mock_calls, expected)
self.assertEqual(mock.a().mock_calls, [call()])
mock = MagicMock(**kwargs)
mock(1)(2)(3)
self.assertEqual(mock.mock_calls, call(1)(2)(3).call_list())
self.assertEqual(mock().mock_calls, call(2)(3).call_list())
self.assertEqual(mock()().mock_calls, call(3).call_list())
mock = MagicMock(**kwargs)
mock(1)(2)(3).a.b.c(4)
self.assertEqual(mock.mock_calls,
call(1)(2)(3).a.b.c(4).call_list())
self.assertEqual(mock().mock_calls,
call(2)(3).a.b.c(4).call_list())
self.assertEqual(mock()().mock_calls,
call(3).a.b.c(4).call_list())
mock = MagicMock(**kwargs)
int(mock().foo.bar().baz())
last_call = ('().foo.bar().baz().__int__', (), {})
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock().mock_calls,
call.foo.bar().baz().__int__().call_list())
self.assertEqual(mock().foo.bar().mock_calls,
call.baz().__int__().call_list())
self.assertEqual(mock().foo.bar().baz.mock_calls,
call().__int__().call_list())
def test_child_mock_call_equal(self):
m = Mock()
result = m()
result.wibble()
# parent looks like this:
self.assertEqual(m.mock_calls, [call(), call().wibble()])
# but child should look like this:
self.assertEqual(result.mock_calls, [call.wibble()])
def test_mock_call_not_equal_leaf(self):
m = Mock()
m.foo().something()
self.assertNotEqual(m.mock_calls[1], call.foo().different())
self.assertEqual(m.mock_calls[0], call.foo())
def test_mock_call_not_equal_non_leaf(self):
m = Mock()
m.foo().bar()
self.assertNotEqual(m.mock_calls[1], call.baz().bar())
self.assertNotEqual(m.mock_calls[0], call.baz())
def test_mock_call_not_equal_non_leaf_params_different(self):
m = Mock()
m.foo(x=1).bar()
# This isn't ideal, but there's no way to fix it without breaking backwards compatibility:
self.assertEqual(m.mock_calls[1], call.foo(x=2).bar())
def test_mock_call_not_equal_non_leaf_attr(self):
m = Mock()
m.foo.bar()
self.assertNotEqual(m.mock_calls[0], call.baz.bar())
def test_mock_call_not_equal_non_leaf_call_versus_attr(self):
m = Mock()
m.foo.bar()
self.assertNotEqual(m.mock_calls[0], call.foo().bar())
def test_mock_call_repr(self):
m = Mock()
m.foo().bar().baz.bob()
self.assertEqual(repr(m.mock_calls[0]), 'call.foo()')
self.assertEqual(repr(m.mock_calls[1]), 'call.foo().bar()')
self.assertEqual(repr(m.mock_calls[2]), 'call.foo().bar().baz.bob()')
def test_mock_call_repr_loop(self):
m = Mock()
m.foo = m
repr(m.foo())
self.assertRegex(repr(m.foo()), r"<Mock name='mock\(\)' id='\d+'>")
def test_mock_calls_contains(self):
m = Mock()
self.assertFalse([call()] in m.mock_calls)
def test_subclassing(self):
class Subclass(Mock):
pass
mock = Subclass()
self.assertIsInstance(mock.foo, Subclass)
self.assertIsInstance(mock(), Subclass)
class Subclass(Mock):
def _get_child_mock(self, **kwargs):
return Mock(**kwargs)
mock = Subclass()
self.assertNotIsInstance(mock.foo, Subclass)
self.assertNotIsInstance(mock(), Subclass)
def test_arg_lists(self):
mocks = [
Mock(),
MagicMock(),
NonCallableMock(),
NonCallableMagicMock()
]
def assert_attrs(mock):
names = 'call_args_list', 'method_calls', 'mock_calls'
for name in names:
attr = getattr(mock, name)
self.assertIsInstance(attr, _CallList)
self.assertIsInstance(attr, list)
self.assertEqual(attr, [])
for mock in mocks:
assert_attrs(mock)
if callable(mock):
mock()
mock(1, 2)
mock(a=3)
mock.reset_mock()
assert_attrs(mock)
mock.foo()
mock.foo.bar(1, a=3)
mock.foo(1).bar().baz(3)
mock.reset_mock()
assert_attrs(mock)
def test_call_args_two_tuple(self):
mock = Mock()
mock(1, a=3)
mock(2, b=4)
self.assertEqual(len(mock.call_args), 2)
self.assertEqual(mock.call_args.args, (2,))
self.assertEqual(mock.call_args.kwargs, dict(b=4))
expected_list = [((1,), dict(a=3)), ((2,), dict(b=4))]
for expected, call_args in zip(expected_list, mock.call_args_list):
self.assertEqual(len(call_args), 2)
self.assertEqual(expected[0], call_args[0])
self.assertEqual(expected[1], call_args[1])
def test_side_effect_iterator(self):
mock = Mock(side_effect=iter([1, 2, 3]))
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
mock = MagicMock(side_effect=['a', 'b', 'c'])
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
mock = Mock(side_effect='ghi')
self.assertEqual([mock(), mock(), mock()], ['g', 'h', 'i'])
self.assertRaises(StopIteration, mock)
class Foo(object):
pass
mock = MagicMock(side_effect=Foo)
self.assertIsInstance(mock(), Foo)
mock = Mock(side_effect=Iter())
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
def test_side_effect_iterator_exceptions(self):
for Klass in Mock, MagicMock:
iterable = (ValueError, 3, KeyError, 6)
m = Klass(side_effect=iterable)
self.assertRaises(ValueError, m)
self.assertEqual(m(), 3)
self.assertRaises(KeyError, m)
self.assertEqual(m(), 6)
def test_side_effect_setting_iterator(self):
mock = Mock()
mock.side_effect = iter([1, 2, 3])
self.assertEqual([mock(), mock(), mock()], [1, 2, 3])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
mock.side_effect = ['a', 'b', 'c']
self.assertEqual([mock(), mock(), mock()], ['a', 'b', 'c'])
self.assertRaises(StopIteration, mock)
side_effect = mock.side_effect
self.assertIsInstance(side_effect, type(iter([])))
this_iter = Iter()
mock.side_effect = this_iter
self.assertEqual([mock(), mock(), mock(), mock()],
['this', 'is', 'an', 'iter'])
self.assertRaises(StopIteration, mock)
self.assertIs(mock.side_effect, this_iter)
def test_side_effect_iterator_default(self):
mock = Mock(return_value=2)
mock.side_effect = iter([1, DEFAULT])
self.assertEqual([mock(), mock()], [1, 2])
def test_assert_has_calls_any_order(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(b=6)
kalls = [
call(1, 2), ({'a': 3},),
((3, 4),), ((), {'a': 3}),
('', (1, 2)), ('', {'a': 3}),
('', (1, 2), {}), ('', (), {'a': 3})
]
for kall in kalls:
mock.assert_has_calls([kall], any_order=True)
for kall in call(1, '2'), call(b=3), call(), 3, None, 'foo':
self.assertRaises(
AssertionError, mock.assert_has_calls,
[kall], any_order=True
)
kall_lists = [
[call(1, 2), call(b=6)],
[call(3, 4), call(1, 2)],
[call(b=6), call(b=6)],
]
for kall_list in kall_lists:
mock.assert_has_calls(kall_list, any_order=True)
kall_lists = [
[call(b=6), call(b=6), call(b=6)],
[call(1, 2), call(1, 2)],
[call(3, 4), call(1, 2), call(5, 7)],
[call(b=6), call(3, 4), call(b=6), call(1, 2), call(b=6)],
]
for kall_list in kall_lists:
self.assertRaises(
AssertionError, mock.assert_has_calls,
kall_list, any_order=True
)
def test_assert_has_calls(self):
kalls1 = [
call(1, 2), ({'a': 3},),
((3, 4),), call(b=6),
('', (1,), {'b': 6}),
]
kalls2 = [call.foo(), call.bar(1)]
kalls2.extend(call.spam().baz(a=3).call_list())
kalls2.extend(call.bam(set(), foo={}).fish([1]).call_list())
mocks = []
for mock in Mock(), MagicMock():
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
mock(1, b=6)
mocks.append((mock, kalls1))
mock = Mock()
mock.foo()
mock.bar(1)
mock.spam().baz(a=3)
mock.bam(set(), foo={}).fish([1])
mocks.append((mock, kalls2))
for mock, kalls in mocks:
for i in range(len(kalls)):
for step in 1, 2, 3:
these = kalls[i:i+step]
mock.assert_has_calls(these)
if len(these) > 1:
self.assertRaises(
AssertionError,
mock.assert_has_calls,
list(reversed(these))
)
def test_assert_has_calls_nested_spec(self):
class Something:
def __init__(self): pass
def meth(self, a, b, c, d=None): pass
class Foo:
def __init__(self, a): pass
def meth1(self, a, b): pass
mock_class = create_autospec(Something)
for m in [mock_class, mock_class()]:
m.meth(1, 2, 3, d=1)
m.assert_has_calls([call.meth(1, 2, 3, d=1)])
m.assert_has_calls([call.meth(1, 2, 3, 1)])
mock_class.reset_mock()
for m in [mock_class, mock_class()]:
self.assertRaises(AssertionError, m.assert_has_calls, [call.Foo()])
m.Foo(1).meth1(1, 2)
m.assert_has_calls([call.Foo(1), call.Foo(1).meth1(1, 2)])
m.Foo.assert_has_calls([call(1), call().meth1(1, 2)])
mock_class.reset_mock()
invalid_calls = [call.meth(1),
call.non_existent(1),
call.Foo().non_existent(1),
call.Foo().meth(1, 2, 3, 4)]
for kall in invalid_calls:
self.assertRaises(AssertionError,
mock_class.assert_has_calls,
[kall]
)
def test_assert_has_calls_nested_without_spec(self):
m = MagicMock()
m().foo().bar().baz()
m.one().two().three()
calls = call.one().two().three().call_list()
m.assert_has_calls(calls)
def test_assert_has_calls_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock(4, 5, c=6, d=7)
mock(10, 11, c=12)
calls = [
('', (1, 2, 3), {}),
('', (4, 5, 6), {'d': 7}),
((10, 11, 12), {}),
]
mock.assert_has_calls(calls)
mock.assert_has_calls(calls, any_order=True)
mock.assert_has_calls(calls[1:])
mock.assert_has_calls(calls[1:], any_order=True)
mock.assert_has_calls(calls[:-1])
mock.assert_has_calls(calls[:-1], any_order=True)
# Reversed order
calls = list(reversed(calls))
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls)
mock.assert_has_calls(calls, any_order=True)
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls[1:])
mock.assert_has_calls(calls[1:], any_order=True)
with self.assertRaises(AssertionError):
mock.assert_has_calls(calls[:-1])
mock.assert_has_calls(calls[:-1], any_order=True)
def test_assert_has_calls_not_matching_spec_error(self):
def f(x=None): pass
mock = Mock(spec=f)
mock(1)
with self.assertRaisesRegex(
AssertionError,
'^{}$'.format(
re.escape('Calls not found.\n'
'Expected: [call()]\n'
'Actual: [call(1)]'))) as cm:
mock.assert_has_calls([call()])
self.assertIsNone(cm.exception.__cause__)
with self.assertRaisesRegex(
AssertionError,
'^{}$'.format(
re.escape(
'Error processing expected calls.\n'
"Errors: [None, TypeError('too many positional arguments')]\n"
"Expected: [call(), call(1, 2)]\n"
'Actual: [call(1)]'))) as cm:
mock.assert_has_calls([call(), call(1, 2)])
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_assert_any_call(self):
mock = Mock()
mock(1, 2)
mock(a=3)
mock(1, b=6)
mock.assert_any_call(1, 2)
mock.assert_any_call(a=3)
mock.assert_any_call(1, b=6)
self.assertRaises(
AssertionError,
mock.assert_any_call
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
1, 3
)
self.assertRaises(
AssertionError,
mock.assert_any_call,
a=4
)
def test_assert_any_call_with_function_spec(self):
def f(a, b, c, d=None): pass
mock = Mock(spec=f)
mock(1, b=2, c=3)
mock(4, 5, c=6, d=7)
mock.assert_any_call(1, 2, 3)
mock.assert_any_call(a=1, b=2, c=3)
mock.assert_any_call(4, 5, 6, 7)
mock.assert_any_call(a=4, b=5, c=6, d=7)
self.assertRaises(AssertionError, mock.assert_any_call,
1, b=3, c=2)
# Expected call doesn't match the spec's signature
with self.assertRaises(AssertionError) as cm:
mock.assert_any_call(e=8)
self.assertIsInstance(cm.exception.__cause__, TypeError)
def test_mock_calls_create_autospec(self):
def f(a, b): pass
obj = Iter()
obj.f = f
funcs = [
create_autospec(f),
create_autospec(obj).f
]
for func in funcs:
func(1, 2)
func(3, 4)
self.assertEqual(
func.mock_calls, [call(1, 2), call(3, 4)]
)
#Issue21222
def test_create_autospec_with_name(self):
m = mock.create_autospec(object(), name='sweet_func')
self.assertIn('sweet_func', repr(m))
#Issue23078
def test_create_autospec_classmethod_and_staticmethod(self):
class TestClass:
@classmethod
def class_method(cls): pass
@staticmethod
def static_method(): pass
for method in ('class_method', 'static_method'):
with self.subTest(method=method):
mock_method = mock.create_autospec(getattr(TestClass, method))
mock_method()
mock_method.assert_called_once_with()
self.assertRaises(TypeError, mock_method, 'extra_arg')
#Issue21238
def test_mock_unsafe(self):
m = Mock()
msg = "Attributes cannot start with 'assert' or 'assret'"
with self.assertRaisesRegex(AttributeError, msg):
m.assert_foo_call()
with self.assertRaisesRegex(AttributeError, msg):
m.assret_foo_call()
m = Mock(unsafe=True)
m.assert_foo_call()
m.assret_foo_call()
#Issue21262
def test_assert_not_called(self):
m = Mock()
m.hello.assert_not_called()
m.hello()
with self.assertRaises(AssertionError):
m.hello.assert_not_called()
def test_assert_not_called_message(self):
m = Mock()
m(1, 2)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1, 2)]"),
m.assert_not_called)
def test_assert_called(self):
m = Mock()
with self.assertRaises(AssertionError):
m.hello.assert_called()
m.hello()
m.hello.assert_called()
m.hello()
m.hello.assert_called()
def test_assert_called_once(self):
m = Mock()
with self.assertRaises(AssertionError):
m.hello.assert_called_once()
m.hello()
m.hello.assert_called_once()
m.hello()
with self.assertRaises(AssertionError):
m.hello.assert_called_once()
def test_assert_called_once_message(self):
m = Mock()
m(1, 2)
m(3)
self.assertRaisesRegex(AssertionError,
re.escape("Calls: [call(1, 2), call(3)]"),
m.assert_called_once)
def test_assert_called_once_message_not_called(self):
m = Mock()
with self.assertRaises(AssertionError) as e:
m.assert_called_once()
self.assertNotIn("Calls:", str(e.exception))
#Issue37212 printout of keyword args now preserves the original order
def test_ordered_call_signature(self):
m = Mock()
m.hello(name='hello', daddy='hero')
text = "call(name='hello', daddy='hero')"
self.assertEqual(repr(m.hello.call_args), text)
#Issue21270 overrides tuple methods for mock.call objects
def test_override_tuple_methods(self):
c = call.count()
i = call.index(132,'hello')
m = Mock()
m.count()
m.index(132,"hello")
self.assertEqual(m.method_calls[0], c)
self.assertEqual(m.method_calls[1], i)
def test_reset_return_sideeffect(self):
m = Mock(return_value=10, side_effect=[2,3])
m.reset_mock(return_value=True, side_effect=True)
self.assertIsInstance(m.return_value, Mock)
self.assertEqual(m.side_effect, None)
def test_reset_return(self):
m = Mock(return_value=10, side_effect=[2,3])
m.reset_mock(return_value=True)
self.assertIsInstance(m.return_value, Mock)
self.assertNotEqual(m.side_effect, None)
def test_reset_sideeffect(self):
m = Mock(return_value=10, side_effect=[2,3])
m.reset_mock(side_effect=True)
self.assertEqual(m.return_value, 10)
self.assertEqual(m.side_effect, None)
def test_mock_add_spec(self):
class _One(object):
one = 1
class _Two(object):
two = 2
class Anything(object):
one = two = three = 'four'
klasses = [
Mock, MagicMock, NonCallableMock, NonCallableMagicMock
]
for Klass in list(klasses):
klasses.append(lambda K=Klass: K(spec=Anything))
klasses.append(lambda K=Klass: K(spec_set=Anything))
for Klass in klasses:
for kwargs in dict(), dict(spec_set=True):
mock = Klass()
#no error
mock.one, mock.two, mock.three
for One, Two in [(_One, _Two), (['one'], ['two'])]:
for kwargs in dict(), dict(spec_set=True):
mock.mock_add_spec(One, **kwargs)
mock.one
self.assertRaises(
AttributeError, getattr, mock, 'two'
)
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
mock.mock_add_spec(Two, **kwargs)
self.assertRaises(
AttributeError, getattr, mock, 'one'
)
mock.two
self.assertRaises(
AttributeError, getattr, mock, 'three'
)
if 'spec_set' in kwargs:
self.assertRaises(
AttributeError, setattr, mock, 'three', None
)
# note that creating a mock, setting an instance attribute, and
# *then* setting a spec doesn't work. Not the intended use case
def test_mock_add_spec_magic_methods(self):
for Klass in MagicMock, NonCallableMagicMock:
mock = Klass()
int(mock)
mock.mock_add_spec(object)
self.assertRaises(TypeError, int, mock)
mock = Klass()
mock['foo']
mock.__int__.return_value =4
mock.mock_add_spec(int)
self.assertEqual(int(mock), 4)
self.assertRaises(TypeError, lambda: mock['foo'])
def test_adding_child_mock(self):
for Klass in (NonCallableMock, Mock, MagicMock, NonCallableMagicMock,
AsyncMock):
mock = Klass()
mock.foo = Mock()
mock.foo()
self.assertEqual(mock.method_calls, [call.foo()])
self.assertEqual(mock.mock_calls, [call.foo()])
mock = Klass()
mock.bar = Mock(name='name')
mock.bar()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
# mock with an existing _new_parent but no name
mock = Klass()
mock.baz = MagicMock()()
mock.baz()
self.assertEqual(mock.method_calls, [])
self.assertEqual(mock.mock_calls, [])
def test_adding_return_value_mock(self):
for Klass in Mock, MagicMock:
mock = Klass()
mock.return_value = MagicMock()
mock()()
self.assertEqual(mock.mock_calls, [call(), call()()])
def test_manager_mock(self):
class Foo(object):
one = 'one'
two = 'two'
manager = Mock()
p1 = patch.object(Foo, 'one')
p2 = patch.object(Foo, 'two')
mock_one = p1.start()
self.addCleanup(p1.stop)
mock_two = p2.start()
self.addCleanup(p2.stop)
manager.attach_mock(mock_one, 'one')
manager.attach_mock(mock_two, 'two')
Foo.two()
Foo.one()
self.assertEqual(manager.mock_calls, [call.two(), call.one()])
def test_magic_methods_mock_calls(self):
for Klass in Mock, MagicMock:
m = Klass()
m.__int__ = Mock(return_value=3)
m.__float__ = MagicMock(return_value=3.0)
int(m)
float(m)
self.assertEqual(m.mock_calls, [call.__int__(), call.__float__()])
self.assertEqual(m.method_calls, [])
def test_mock_open_reuse_issue_21750(self):
mocked_open = mock.mock_open(read_data='data')
f1 = mocked_open('a-name')
f1_data = f1.read()
f2 = mocked_open('another-name')
f2_data = f2.read()
self.assertEqual(f1_data, f2_data)
def test_mock_open_dunder_iter_issue(self):
# Test dunder_iter method generates the expected result and
# consumes the iterator.
mocked_open = mock.mock_open(read_data='Remarkable\nNorwegian Blue')
f1 = mocked_open('a-name')
lines = [line for line in f1]
self.assertEqual(lines[0], 'Remarkable\n')
self.assertEqual(lines[1], 'Norwegian Blue')
self.assertEqual(list(f1), [])
def test_mock_open_using_next(self):
mocked_open = mock.mock_open(read_data='1st line\n2nd line\n3rd line')
f1 = mocked_open('a-name')
line1 = next(f1)
line2 = f1.__next__()
lines = [line for line in f1]
self.assertEqual(line1, '1st line\n')
self.assertEqual(line2, '2nd line\n')
self.assertEqual(lines[0], '3rd line')
self.assertEqual(list(f1), [])
with self.assertRaises(StopIteration):
next(f1)
def test_mock_open_write(self):
# Test exception in file writing write()
mock_namedtemp = mock.mock_open(mock.MagicMock(name='JLV'))
with mock.patch('tempfile.NamedTemporaryFile', mock_namedtemp):
mock_filehandle = mock_namedtemp.return_value
mock_write = mock_filehandle.write
mock_write.side_effect = OSError('Test 2 Error')
def attempt():
tempfile.NamedTemporaryFile().write('asd')
self.assertRaises(OSError, attempt)
def test_mock_open_alter_readline(self):
mopen = mock.mock_open(read_data='foo\nbarn')
mopen.return_value.readline.side_effect = lambda *args:'abc'
first = mopen().readline()
second = mopen().readline()
self.assertEqual('abc', first)
self.assertEqual('abc', second)
def test_mock_open_after_eof(self):
# read, readline and readlines should work after end of file.
_open = mock.mock_open(read_data='foo')
h = _open('bar')
h.read()
self.assertEqual('', h.read())
self.assertEqual('', h.read())
self.assertEqual('', h.readline())
self.assertEqual('', h.readline())
self.assertEqual([], h.readlines())
self.assertEqual([], h.readlines())
def test_mock_parents(self):
for Klass in Mock, MagicMock:
m = Klass()
original_repr = repr(m)
m.return_value = m
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m.reset_mock()
self.assertIs(m(), m)
self.assertEqual(repr(m), original_repr)
m = Klass()
m.b = m.a
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m.reset_mock()
self.assertIn("name='mock.a'", repr(m.b))
self.assertIn("name='mock.a'", repr(m.a))
m = Klass()
original_repr = repr(m)
m.a = m()
m.a.return_value = m
self.assertEqual(repr(m), original_repr)
self.assertEqual(repr(m.a()), original_repr)
def test_attach_mock(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in classes:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'bar')
self.assertIs(m.bar, m2)
self.assertIn("name='mock.bar'", repr(m2))
m.bar.baz(1)
self.assertEqual(m.mock_calls, [call.bar.baz(1)])
self.assertEqual(m.method_calls, [call.bar.baz(1)])
def test_attach_mock_return_value(self):
classes = Mock, MagicMock, NonCallableMagicMock, NonCallableMock
for Klass in Mock, MagicMock:
for Klass2 in classes:
m = Klass()
m2 = Klass2(name='foo')
m.attach_mock(m2, 'return_value')
self.assertIs(m(), m2)
self.assertIn("name='mock()'", repr(m2))
m2.foo()
self.assertEqual(m.mock_calls, call().foo().call_list())
def test_attach_mock_patch_autospec(self):
parent = Mock()
with mock.patch(f'{__name__}.something', autospec=True) as mock_func:
self.assertEqual(mock_func.mock._extract_mock_name(), 'something')
parent.attach_mock(mock_func, 'child')
parent.child(1)
something(2)
mock_func(3)
parent_calls = [call.child(1), call.child(2), call.child(3)]
child_calls = [call(1), call(2), call(3)]
self.assertEqual(parent.mock_calls, parent_calls)
self.assertEqual(parent.child.mock_calls, child_calls)
self.assertEqual(something.mock_calls, child_calls)
self.assertEqual(mock_func.mock_calls, child_calls)
self.assertIn('mock.child', repr(parent.child.mock))
self.assertEqual(mock_func.mock._extract_mock_name(), 'mock.child')
def test_attach_mock_patch_autospec_signature(self):
with mock.patch(f'{__name__}.Something.meth', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_meth')
obj = Something()
obj.meth(1, 2, 3, d=4)
manager.assert_has_calls([call.attach_meth(mock.ANY, 1, 2, 3, d=4)])
obj.meth.assert_has_calls([call(mock.ANY, 1, 2, 3, d=4)])
mocked.assert_has_calls([call(mock.ANY, 1, 2, 3, d=4)])
with mock.patch(f'{__name__}.something', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_func')
something(1)
manager.assert_has_calls([call.attach_func(1)])
something.assert_has_calls([call(1)])
mocked.assert_has_calls([call(1)])
with mock.patch(f'{__name__}.Something', autospec=True) as mocked:
manager = Mock()
manager.attach_mock(mocked, 'attach_obj')
obj = Something()
obj.meth(1, 2, 3, d=4)
manager.assert_has_calls([call.attach_obj(),
call.attach_obj().meth(1, 2, 3, d=4)])
obj.meth.assert_has_calls([call(1, 2, 3, d=4)])
mocked.assert_has_calls([call(), call().meth(1, 2, 3, d=4)])
def test_attribute_deletion(self):
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
self.assertTrue(hasattr(mock, 'm'))
del mock.m
self.assertFalse(hasattr(mock, 'm'))
del mock.f
self.assertFalse(hasattr(mock, 'f'))
self.assertRaises(AttributeError, getattr, mock, 'f')
def test_mock_does_not_raise_on_repeated_attribute_deletion(self):
# bpo-20239: Assigning and deleting twice an attribute raises.
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
mock.foo = 3
self.assertTrue(hasattr(mock, 'foo'))
self.assertEqual(mock.foo, 3)
del mock.foo
self.assertFalse(hasattr(mock, 'foo'))
mock.foo = 4
self.assertTrue(hasattr(mock, 'foo'))
self.assertEqual(mock.foo, 4)
del mock.foo
self.assertFalse(hasattr(mock, 'foo'))
def test_mock_raises_when_deleting_nonexistent_attribute(self):
for mock in (Mock(), MagicMock(), NonCallableMagicMock(),
NonCallableMock()):
del mock.foo
with self.assertRaises(AttributeError):
del mock.foo
def test_reset_mock_does_not_raise_on_attr_deletion(self):
# bpo-31177: reset_mock should not raise AttributeError when attributes
# were deleted in a mock instance
mock = Mock()
mock.child = True
del mock.child
mock.reset_mock()
self.assertFalse(hasattr(mock, 'child'))
def test_class_assignable(self):
for mock in Mock(), MagicMock():
self.assertNotIsInstance(mock, int)
mock.__class__ = int
self.assertIsInstance(mock, int)
mock.foo
def test_name_attribute_of_call(self):
# bpo-35357: _Call should not disclose any attributes whose names
# may clash with popular ones (such as ".name")
self.assertIsNotNone(call.name)
self.assertEqual(type(call.name), _Call)
self.assertEqual(type(call.name().name), _Call)
def test_parent_attribute_of_call(self):
# bpo-35357: _Call should not disclose any attributes whose names
# may clash with popular ones (such as ".parent")
self.assertIsNotNone(call.parent)
self.assertEqual(type(call.parent), _Call)
self.assertEqual(type(call.parent().parent), _Call)
def test_parent_propagation_with_create_autospec(self):
def foo(a, b): pass
mock = Mock()
mock.child = create_autospec(foo)
mock.child(1, 2)
self.assertRaises(TypeError, mock.child, 1)
self.assertEqual(mock.mock_calls, [call.child(1, 2)])
self.assertIn('mock.child', repr(mock.child.mock))
def test_parent_propagation_with_autospec_attach_mock(self):
def foo(a, b): pass
parent = Mock()
parent.attach_mock(create_autospec(foo, name='bar'), 'child')
parent.child(1, 2)
self.assertRaises(TypeError, parent.child, 1)
self.assertEqual(parent.child.mock_calls, [call.child(1, 2)])
self.assertIn('mock.child', repr(parent.child.mock))
def test_isinstance_under_settrace(self):
# bpo-36593 : __class__ is not set for a class that has __class__
# property defined when it's used with sys.settrace(trace) set.
# Delete the module to force reimport with tracing function set
# restore the old reference later since there are other tests that are
# dependent on unittest.mock.patch. In testpatch.PatchTest
# test_patch_dict_test_prefix and test_patch_test_prefix not restoring
# causes the objects patched to go out of sync
old_patch = unittest.mock.patch
# Directly using __setattr__ on unittest.mock causes current imported
# reference to be updated. Use a lambda so that during cleanup the
# re-imported new reference is updated.
self.addCleanup(lambda patch: setattr(unittest.mock, 'patch', patch),
old_patch)
with patch.dict('sys.modules'):
del sys.modules['unittest.mock']
# This trace will stop coverage being measured ;-)
def trace(frame, event, arg): # pragma: no cover
return trace
self.addCleanup(sys.settrace, sys.gettrace())
sys.settrace(trace)
from unittest.mock import (
Mock, MagicMock, NonCallableMock, NonCallableMagicMock
)
mocks = [
Mock, MagicMock, NonCallableMock, NonCallableMagicMock, AsyncMock
]
for mock in mocks:
obj = mock(spec=Something)
self.assertIsInstance(obj, Something)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "88f81ab761ee4bd12ee94d80fb7687be",
"timestamp": "",
"source": "github",
"line_count": 2085,
"max_line_length": 98,
"avg_line_length": 32.96306954436451,
"alnum_prop": 0.5443778372715633,
"repo_name": "xyuanmu/XX-Net",
"id": "1cde45e9aea5550b42c2150306842552b902ef79",
"size": "68728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python3.8.2/Lib/unittest/test/testmock/testmock.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4145"
},
{
"name": "C",
"bytes": "53301"
},
{
"name": "CSS",
"bytes": "94951"
},
{
"name": "HTML",
"bytes": "252022"
},
{
"name": "JavaScript",
"bytes": "22405"
},
{
"name": "Python",
"bytes": "15474534"
},
{
"name": "Shell",
"bytes": "10208"
},
{
"name": "Visual Basic",
"bytes": "1795"
}
],
"symlink_target": ""
}
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as file:
long_description = file.read()
setup(
author='Silas Boyd-Wickizer',
author_email='silas@godaddy.com',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
),
description='Transaction barriers for Django and Celery.',
download_url='https://github.com/godaddy/django_transaction_barrier/tarball/0.3',
include_package_data=True,
install_requires=('Django>=1.4.0,<1.8.0','celery>=3.0.0,<4.0.0'),
keywords=('django', 'transaction', 'celery', 'atomic'),
license=open('LICENSE.txt').read(),
long_description=long_description,
name='django-transaction-barrier',
packages=('django_transaction_barrier', 'django_transaction_barrier.migrations'),
url='https://github.com/godaddy/django_transaction_barrier',
version='0.3',
zip_safe=True,
)
|
{
"content_hash": "62fffde813cfcd2fe80a1e6fa289e153",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 83,
"avg_line_length": 33.35294117647059,
"alnum_prop": 0.6957671957671958,
"repo_name": "godaddy/django_transaction_barrier",
"id": "d81d079de1d95f57a551871e820cb44a24272170",
"size": "1134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19462"
},
{
"name": "Shell",
"bytes": "176"
}
],
"symlink_target": ""
}
|
"""Tests for IdentityOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class IdentityOpTest(test.TestCase):
@test_util.run_deprecated_v1
def testInt32_6(self):
with self.cached_session():
value = array_ops.identity([1, 2, 3, 4, 5, 6]).eval()
self.assertAllEqual(np.array([1, 2, 3, 4, 5, 6]), value)
@test_util.run_deprecated_v1
def testInt32_2_3(self):
with self.cached_session():
inp = constant_op.constant([10, 20, 30, 40, 50, 60], shape=[2, 3])
value = array_ops.identity(inp).eval()
self.assertAllEqual(np.array([[10, 20, 30], [40, 50, 60]]), value)
@test_util.run_deprecated_v1
def testString(self):
source = [b"A", b"b", b"C", b"d", b"E", b"f"]
with self.cached_session():
value = array_ops.identity(source).eval()
self.assertAllEqual(source, value)
def testIdentityShape(self):
with self.cached_session():
shape = [2, 3]
array_2x3 = [[1, 2, 3], [6, 5, 4]]
tensor = constant_op.constant(array_2x3)
self.assertEquals(shape, tensor.get_shape())
self.assertEquals(shape, array_ops.identity(tensor).get_shape())
self.assertEquals(shape, array_ops.identity(array_2x3).get_shape())
self.assertEquals(shape,
array_ops.identity(np.array(array_2x3)).get_shape())
@test_util.run_v1_only("b/120545219")
def testRefIdentityShape(self):
with self.cached_session():
shape = [2, 3]
tensor = variables.VariableV1(
constant_op.constant(
[[1, 2, 3], [6, 5, 4]], dtype=dtypes.int32))
self.assertEquals(shape, tensor.get_shape())
self.assertEquals(shape, gen_array_ops.ref_identity(tensor).get_shape())
if __name__ == "__main__":
test.main()
|
{
"content_hash": "63e71968c8d06ab898dd52057c29ab9f",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 78,
"avg_line_length": 34.095238095238095,
"alnum_prop": 0.6620111731843575,
"repo_name": "ghchinoy/tensorflow",
"id": "40ec9db4226a89305732683118f7f906db1ba965",
"size": "2837",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/identity_op_py_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "699905"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "67022491"
},
{
"name": "CMake",
"bytes": "206499"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1585039"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "836400"
},
{
"name": "Jupyter Notebook",
"bytes": "1665583"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "98194"
},
{
"name": "Objective-C",
"bytes": "94022"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17600"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48407007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "476920"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
}
],
"symlink_target": ""
}
|
import nba, os
import sys
for netdev in nba.get_netdevices():
print(netdev)
for coproc in nba.get_coprocessors():
print(coproc)
node_cpus = nba.get_cpu_node_mapping()
for node_id, cpus in enumerate(node_cpus):
print('Cores in NUMA node {0}: [{1}]'.format(node_id, ', '.join(map(str, cpus))))
# The values read by the framework are:
# - system_params
# - io_threads
# - comp_threads
# - coproc_threads
# - queues
# - thread_connections
# 64, 64, 192 is optimal parameter for ipv4-router
system_params = {
'IO_BATCH_SIZE': int(os.environ.get('NBA_IO_BATCH_SIZE', 64)),
'COMP_BATCH_SIZE': int(os.environ.get('NBA_COMP_BATCH_SIZE', 64)),
'COPROC_PPDEPTH': int(os.environ.get('NBA_COPROC_PPDEPTH', 32)),
'COPROC_CTX_PER_COMPTHREAD': 1,
}
print("# logical cores: {0}, # physical cores {1} (hyperthreading {2})".format(
nba.num_logical_cores, nba.num_physical_cores,
"enabled" if nba.ht_enabled else "disabled"
))
_ht_diff = nba.num_physical_cores if nba.ht_enabled else 0
io_threads = [
# core_id, list of (port_id, rxq_idx)
nba.IOThread(core_id=node_cpus[0][0], attached_rxqs=[(0, 0), (1, 0), (2, 0), (3, 0)], mode='normal'),
nba.IOThread(core_id=node_cpus[0][1], attached_rxqs=[(0, 1), (1, 1), (2, 1), (3, 1)], mode='normal'),
nba.IOThread(core_id=node_cpus[0][2], attached_rxqs=[(0, 2), (1, 2), (2, 2), (3, 2)], mode='normal'),
]
comp_threads = [
# core_id
nba.CompThread(core_id=node_cpus[0][0] + _ht_diff),
nba.CompThread(core_id=node_cpus[0][1] + _ht_diff),
nba.CompThread(core_id=node_cpus[0][2] + _ht_diff),
]
coproc_threads = [
# core_id, device_id
nba.CoprocThread(core_id=node_cpus[0][7] + _ht_diff, device_id=0),
]
comp_input_queues = [
# node_id, template
nba.Queue(node_id=0, template='swrx'),
nba.Queue(node_id=0, template='swrx'),
nba.Queue(node_id=0, template='swrx'),
]
coproc_input_queues = [
# node_id, template
nba.Queue(node_id=0, template='taskin'),
]
coproc_completion_queues = [
# node_id, template
nba.Queue(node_id=0, template='taskout'),
nba.Queue(node_id=0, template='taskout'),
nba.Queue(node_id=0, template='taskout'),
]
queues = comp_input_queues + coproc_input_queues + coproc_completion_queues
thread_connections = [
# from-thread, to-thread, queue-instance
(io_threads[0], comp_threads[0], comp_input_queues[0]),
(io_threads[1], comp_threads[1], comp_input_queues[1]),
(io_threads[2], comp_threads[2], comp_input_queues[2]),
(comp_threads[0], coproc_threads[0], coproc_input_queues[0]),
(comp_threads[1], coproc_threads[0], coproc_input_queues[0]),
(comp_threads[2], coproc_threads[0], coproc_input_queues[0]),
(coproc_threads[0], comp_threads[0], coproc_completion_queues[0]),
(coproc_threads[0], comp_threads[1], coproc_completion_queues[1]),
(coproc_threads[0], comp_threads[2], coproc_completion_queues[2]),
]
|
{
"content_hash": "5e1781149255f617bc0b14453442d6ad",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 105,
"avg_line_length": 34.642857142857146,
"alnum_prop": 0.6439862542955327,
"repo_name": "ANLAB-KAIST/NBA",
"id": "616c145e82c7aa29aeea387781b1b53be7880ef7",
"size": "2934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configs/rss-triplecore-singlenode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3592"
},
{
"name": "C++",
"bytes": "2204332"
},
{
"name": "Click",
"bytes": "16961"
},
{
"name": "Cuda",
"bytes": "143730"
},
{
"name": "Python",
"bytes": "179444"
},
{
"name": "Shell",
"bytes": "14263"
}
],
"symlink_target": ""
}
|
from yz_js_django_tpl import BaseJsFilter, JsProcessorRegistry
class HashJsFilter(BaseJsFilter):
"""
Converts the "hash" filter in django templates to native javascript hash look up,
i.e. {{exampleVar|hash:varB}}
Examples:
>>> from yz_js_django_tpl import TemplateJsNode,JsTplSettings
>>> from yz_js_django_tpl.customfilters import *
>>> JsTplSettings.CONFIG['VERSAGER_MODE'] = False
>>> ###############
>>> #test django "hash" filter
>>> ###############
>>> js_tpl = TemplateJsNode('{% load hash %}Dict var1 with hash varB{{ varA|hash:varB }}')
>>> js_tpl.render()
u'function(varA,varB){return "Dict var1 with hash varB"+varA[varB]}'
"""
expected_filter_funcname = 'hash'
def render(self):
return '%s[%s]' % (self.expr, self.arg)
JsProcessorRegistry.register_js_filter(HashJsFilter)
|
{
"content_hash": "67f01402c03944a1f9835e0744c6a269",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 94,
"avg_line_length": 37.52173913043478,
"alnum_prop": 0.6361529548088065,
"repo_name": "comolongo/Yz-Javascript-Django-Template-Compiler",
"id": "cb73ba976dfdaff91708f8057dbdd234b426f2b4",
"size": "1014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "customfilters/HashJsFilter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "876"
},
{
"name": "Python",
"bytes": "54178"
}
],
"symlink_target": ""
}
|
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'The Hog and Home Report'
copyright = u'2014, Colin Powell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TheHogandHomeReportdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'TheHogandHomeReport.tex', u'The Hog and Home Report Documentation',
u'Colin Powell', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'thehogandhomereport', u'The Hog and Home Report Documentation',
[u'Colin Powell'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'TheHogandHomeReport', u'The Hog and Home Report Documentation',
u'Colin Powell', 'TheHogandHomeReport', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "b93bb2620c1ec7ef7ac70d6f49189caa",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 80,
"avg_line_length": 32.15983606557377,
"alnum_prop": 0.7075315407161973,
"repo_name": "powellc/hogandhomereport",
"id": "84adf0b5e70f33a0e137773e6103b95019e65277",
"size": "8283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19216"
},
{
"name": "Shell",
"bytes": "8435"
}
],
"symlink_target": ""
}
|
"""
This module contains the 'create_password' menu node.
"""
from hashlib import sha256
from textwrap import dedent
from menu.password import LEN_PASSWD
def create_password(caller, input):
"""Ask the user to create a password.
This node creates and validates a new password for this
account. It then follows up with the confirmation
(confirm_password).
"""
text = ""
options = (
{
"key": "b",
"desc": "Go back to the login screen.",
"goto": "start",
},
{
"key": "_default",
"desc": "Enter your password.",
"goto": "create_password",
},
)
password = input.strip()
playername = caller.db._playername
if len(password) < LEN_PASSWD:
# The password is too short
text = dedent("""
|rYour password must be at least {} characters long.|n
Type |yb|n to return to the login screen.
Or enter another password.
""".strip("\n")).format(LEN_PASSWD)
else:
# Redirects to the "confirm_passwrod" node
caller.db._password = sha256(password).hexdigest()
text = "Enter your password again."
options = (
{
"key": "_default",
"desc": "Enter the passwod again.",
"goto": "confirm_password",
},
)
return text, options
|
{
"content_hash": "91416b574d864c4a8c5cc5824343d136",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 66,
"avg_line_length": 26.685185185185187,
"alnum_prop": 0.5343511450381679,
"repo_name": "vlegoff/mud",
"id": "46184f114b00a6184b53ec5b3dfa54d6a1db87f8",
"size": "1443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "menu/create_password.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "98485"
}
],
"symlink_target": ""
}
|
import os
def src_dir_path():
r"""The path to the top of the MenpoDetect Python package.
Useful for locating where the models folder is stored.
Returns
-------
path : `str`
The full path to the top of the MenpoDetect package
"""
return os.path.split(os.path.abspath(__file__))[0]
def models_dir_path():
r"""The path to the models directory of the MenpoDetect Python package.
Returns
-------
path : `str`
The full path to the models directory of the MenpoDetect package
"""
return os.path.join(src_dir_path(), 'models')
|
{
"content_hash": "896fe70e0666b52550c1382cc8eb1bfb",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 75,
"avg_line_length": 23.76,
"alnum_prop": 0.6313131313131313,
"repo_name": "yuxiang-zhou/menpodetect",
"id": "bb824b5e1ded036a4617744de41e16eda1332cd5",
"size": "594",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "menpodetect/paths.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "139804"
},
{
"name": "Shell",
"bytes": "119"
}
],
"symlink_target": ""
}
|
import datetime as dt
from flask.ext.login import UserMixin
from vBurgundy.extensions import bcrypt
from vBurgundy.database import (
Column,
db,
Model,
ReferenceCol,
relationship,
SurrogatePK,
)
class Role(SurrogatePK, Model):
__tablename__ = 'roles'
name = Column(db.String(80), unique=True, nullable=False)
user_id = ReferenceCol('users', nullable=True)
user = relationship('User', backref='roles')
def __init__(self, name, **kwargs):
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
return '<Role({name})>'.format(name=self.name)
class User(UserMixin, SurrogatePK, Model):
__tablename__ = 'users'
username = Column(db.String(80), unique=True, nullable=False)
email = Column(db.String(80), unique=True, nullable=False)
#: The hashed password
password = Column(db.String(128), nullable=True)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
first_name = Column(db.String(30), nullable=True)
last_name = Column(db.String(30), nullable=True)
active = Column(db.Boolean(), default=False)
is_admin = Column(db.Boolean(), default=False)
def __init__(self, username, email, password=None, **kwargs):
db.Model.__init__(self, username=username, email=email, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def set_password(self, password):
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
return bcrypt.check_password_hash(self.password, value)
@property
def full_name(self):
return "{0} {1}".format(self.first_name, self.last_name)
def __repr__(self):
return '<User({username!r})>'.format(username=self.username)
|
{
"content_hash": "c8433e98725710d72a18fef526a3eadf",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 80,
"avg_line_length": 31.203389830508474,
"alnum_prop": 0.6512764801738186,
"repo_name": "michaelrice/vBurgundy",
"id": "8bf4ecc87b0e3d1d30de21a5a8a285d80e575686",
"size": "1865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vBurgundy/user/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "198994"
},
{
"name": "JavaScript",
"bytes": "240900"
},
{
"name": "Python",
"bytes": "30193"
}
],
"symlink_target": ""
}
|
import sys, os, datetime, configparser
import numpy as np
def maptext(char):
if char.isdigit(): return 'NUM'
elif char.isalpha(): return char.lower()
return char
def count(filename, vocab = dict()):
for line in open(filename):
for word in map(maptext, line.split()):
if word in vocab: vocab[word] += 1
else: vocab[word] = 1
return vocab
def writetofile(filenamein, filenameout, vocab):
with open(filenameout, 'w') as fileout:
for line in open(filenamein):
for word in map(maptext, line.split()):
fileout.write(str(vocab.index(word) + 1) + ' ')
fileout.write('\n')
def readindexfile(filename):
maximum, vocabulary, reverse = 0, {'BLANK': 0}, {0: 'BLANK'}
for line in open(filename):
index, token = line.split('\t')
vocabulary[token.strip()] = int(index)
reverse[int(index)] = token.strip()
maximum = max(maximum, index)
return vocabulary, reverse, maximum
def readalignfile(file1, file2, lengths):
alignments1, alignments2 = list(), list()
alignments = list()
for line1, line2, length in zip(open(file1), open(file2), lengths):
alignment1, alignment2 = [list() for _ in xrange(length)], [list() for _ in xrange(length)]
for align in line1.strip().split(' '):
left, right = align.split('-')
alignment1[int(right)].append(left)
for align in line2.strip().split(' '):
left, right = align.split('-')
alignment2[int(right)].append(left)
alignments.append(';'.join([' '.join(align1) + ',' + ' '.join(align2) for align1, align2 in zip(alignment1, alignment2)]))
return alignments
def readword2vec(filename, vocabulary = dict()):
with open(filename) as filein:
next(filein)
for line in filein:
tokens = line.split(' ')
vocabulary[''.join(map(maptext, tokens[0].strip()))] = [float(dimension) for dimension in tokens[1: ]]
return vocabulary
if __name__ == '__main__':
config = configparser.ConfigParser(interpolation = configparser.ExtendedInterpolation())
config.read(sys.argv[1])
directory, vocab = config.get('global', 'data'), dict()
for folder in ['train', 'dev', 'test']:
for filename in ['src', 'mt', 'pe']:
vocab = count('%s/%s.%s' %(directory, folder, filename), vocab)
print datetime.datetime.now(), 'read %s.%s' %(folder, filename)
vocab = map(lambda x: x[0], sorted(vocab.items(), key = lambda x: x[1], reverse = True))
with open('%s/map.dict' %directory, 'w') as fileout:
for key, word in enumerate(vocab):
fileout.write('%i\t%s\n' %(key + 1, word.strip()))
print datetime.datetime.now(), 'write mapped dict'
for folder in ['train', 'dev', 'test']:
for filename in ['src', 'mt', 'pe']:
writetofile('%s/%s.%s' %(directory, folder, filename), '%s/mapped.%s.%s' %(directory, folder, filename), vocab)
print datetime.datetime.now(), 'write src mt pe', folder
for folder in ['train', 'dev', 'test']:
with open('%s/merged.%s' %(directory, folder), 'w') as fileout:
for src, mt, pe in zip(*[open('%s/mapped.%s.%s' %(directory, folder, filename)) for filename in ['src', 'mt', 'pe']]):
fileout.write('%s\t%s\t%s\n' %(src.strip(), mt.strip(), pe.strip()))
print datetime.datetime.now(), 'write merged', folder
time, vocab = 0, 0
for filename in ['%s/merged.%s' %(directory, name) for name in ['train', 'test', 'dev']]:
for line in open(filename):
for words in line.split('\t'):
if not words: continue
wordlist = [int(word) for word in words.split()]
vocab = max(vocab, max(wordlist or [0]))
time = max(time, len(wordlist))
print datetime.datetime.now(), 'time', time, 'vocab', vocab
pelengths = [len(line.split()) for line in open('%s/mapped.train.pe' %directory).readlines()]
alignments = readalignfile('%s/train.src-pe' %directory, '%s/train.mt-pe' %directory, pelengths)
with open('%s/train' %directory, 'w') as fileout:
for line, alignment in zip(open('%s/merged.train' %directory), alignments):
fileout.write('%s\t%s\n' %(line.strip(), alignment.strip()))
print datetime.datetime.now(), 'write train'
for folder in ['dev', 'test']:
with open('%s/%s' %(directory, folder), 'w') as fileout:
for line in open('%s/merged.%s' %(directory, folder)):
fileout.write('%s\t\n' %line.strip())
print datetime.datetime.now(), 'write', folder
word2vec, dimension = dict(), 300
for language in ['english', 'german']:
word2vec = readword2vec('%s/%s.model' %(directory, language), word2vec)
print datetime.datetime.now(), 'read model', language
indexvocab, reversevocab, indexmax = readindexfile('%s/map.dict' %directory)
vocab = [np.zeros(dimension, float)]
for word in indexvocab:
if word in word2vec: vocab.append(word2vec[word])
else: vocab.append(np.random.normal(0., np.sqrt(3. / dimension), dimension))
np.savetxt('%s/model' %directory, np.vstack(vocab))
print datetime.datetime.now(), 'write model'
|
{
"content_hash": "5bbdc84a183415bf64c8bfee6782eae0",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 124,
"avg_line_length": 41.55652173913043,
"alnum_prop": 0.6631094371207366,
"repo_name": "aaiijmrtt/AUTOPOSTEDIT",
"id": "a36c7fc09fa44aee00e0309a5516e6327ac1d0aa",
"size": "4779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/process/preprocess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "168450"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: 4e32e2d01c28
Revises:
Create Date: 2017-12-22 12:06:04.886300
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4e32e2d01c28'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('departments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('permissions', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('is_ignored', sa.Boolean(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('department_id', sa.Integer(), nullable=True),
sa.Column('is_super_admin', sa.Boolean(), nullable=True),
sa.ForeignKeyConstraint(['department_id'], ['departments.id'], ),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('reports',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('week_count', sa.Integer(), nullable=True),
sa.Column('year', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_reports_created_at'), 'reports', ['created_at'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_reports_created_at'), table_name='reports')
op.drop_table('reports')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('roles')
op.drop_table('departments')
# ### end Alembic commands ###
|
{
"content_hash": "8c8636963a810d4dbe8b11f7384f05f4",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 91,
"avg_line_length": 37.625,
"alnum_prop": 0.6537467700258398,
"repo_name": "CodingCrush/WeeklyReport",
"id": "f16e2389758d06fcb96a33d9e8e4aff24162834c",
"size": "2709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deploy/migrations/versions/4e32e2d01c28_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13666"
},
{
"name": "Dockerfile",
"bytes": "1642"
},
{
"name": "HTML",
"bytes": "32219"
},
{
"name": "JavaScript",
"bytes": "54179"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "53878"
},
{
"name": "Shell",
"bytes": "548"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name = "htmldom",
packages = ['htmldom'],
version = '2.0',
description = 'HTML parser which can be used for web-scraping applications',
long_description = 'htmldom parses the HTML file and provides methods for iterating and searching and modifying the parse tree in a similar way as Jquery',
author = 'Bhimsen.S.Kulkarni',
author_email = 'bhimsen.pes@gmail.com',
url = 'http://pypi.python.org/',
license = 'FreeBSD License',
platforms = 'Linux',
classifiers = [
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3.2',
'License :: OSI Approved :: BSD License ',
'Operating System :: POSIX :: Linux',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
]);
|
{
"content_hash": "8f6753851afac411b0e1ead855d4a060",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 156,
"avg_line_length": 43.95652173913044,
"alnum_prop": 0.5934718100890207,
"repo_name": "bhimsen92/htmldom",
"id": "807af24a9f3cd9eab876cc6c62c1fba83406c4d9",
"size": "1011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "122765"
}
],
"symlink_target": ""
}
|
from ckan.plugins import toolkit
try:
from ckan.tests import helpers
from ckan.tests import factories
from ckan.tests.helpers import assert_in
except ImportError:
from ckan.new_tests import helpers
from ckan.new_tests import factories
from ckan.new_tests.helpers import assert_in
from ckanext.issues.tests import factories as issue_factories
from ckanext.issues.tests.helpers import (
ClearOnSetupClassMixin,
)
class TestIssuesShowController(helpers.FunctionalTestBase,
ClearOnSetupClassMixin):
def setup(self):
self.user = factories.User()
self.organization = factories.Organization(user=self.user)
self.dataset = factories.Dataset(user=self.user,
owner_org=self.organization['name'],
private=True)
self.issue = issue_factories.Issue(user=self.user,
user_id=self.user['id'],
dataset_id=self.dataset['id'])
def test_show_issue(self):
app = self._get_test_app()
env = {'REMOTE_USER': self.user['name'].encode('ascii')}
response = app.get(
url=toolkit.url_for('issues_show',
dataset_id=self.dataset['id'],
issue_number=self.issue['number']),
extra_environ=env,
)
assert_in(self.issue['title'], response)
assert_in(self.issue['description'], response)
|
{
"content_hash": "74042c5d9a00a2ef401ca5d1e2f9df28",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 77,
"avg_line_length": 37.11904761904762,
"alnum_prop": 0.5766516998075689,
"repo_name": "datagovuk/ckanext-issues",
"id": "9f928126edcb0ce4aa4ef8e8d5cdaa724daa75e1",
"size": "1559",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ckanext/issues/tests/controllers/test_show.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4500"
},
{
"name": "HTML",
"bytes": "38876"
},
{
"name": "JavaScript",
"bytes": "8945"
},
{
"name": "Python",
"bytes": "226935"
},
{
"name": "Shell",
"bytes": "1546"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateContactProfileEmailAddress(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateContactProfileEmailAddress Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateContactProfileEmailAddress, self).__init__(temboo_session, '/Library/SendGrid/WebAPI/Profile/UpdateContactProfileEmailAddress')
def new_input_set(self):
return UpdateContactProfileEmailAddressInputSet()
def _make_result_set(self, result, path):
return UpdateContactProfileEmailAddressResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateContactProfileEmailAddressChoreographyExecution(session, exec_id, path)
class UpdateContactProfileEmailAddressInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateContactProfileEmailAddress
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from SendGrid.)
"""
super(UpdateContactProfileEmailAddressInputSet, self)._set_input('APIKey', value)
def set_APIUser(self, value):
"""
Set the value of the APIUser input for this Choreo. ((required, string) The username registered with SendGrid.)
"""
super(UpdateContactProfileEmailAddressInputSet, self)._set_input('APIUser', value)
def set_NewEmailAddress(self, value):
"""
Set the value of the NewEmailAddress input for this Choreo. ((required, string) A valid email address, not exceeding 100 characters. This address will be used for all future communication with SendGrid. A confirmation email will be sent to validate the change of address.)
"""
super(UpdateContactProfileEmailAddressInputSet, self)._set_input('NewEmailAddress', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format of the response from SendGrid, in either json, or xml. Default is set to json.)
"""
super(UpdateContactProfileEmailAddressInputSet, self)._set_input('ResponseFormat', value)
class UpdateContactProfileEmailAddressResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateContactProfileEmailAddress Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from SendGrid. The format corresponds to the ResponseFormat input. Default is json.)
"""
return self._output.get('Response', None)
class UpdateContactProfileEmailAddressChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateContactProfileEmailAddressResultSet(response, path)
|
{
"content_hash": "55c37ba50c77cd632807abdbd9ad777b",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 280,
"avg_line_length": 47.75,
"alnum_prop": 0.7332751599767307,
"repo_name": "lupyuen/RaspberryPiImage",
"id": "2bdb9024497429d1d44390da75e2c7f91ecfd889",
"size": "4303",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "home/pi/GrovePi/Software/Python/others/temboo/Library/SendGrid/WebAPI/Profile/UpdateContactProfileEmailAddress.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "82308"
},
{
"name": "C",
"bytes": "3197439"
},
{
"name": "C#",
"bytes": "33056"
},
{
"name": "C++",
"bytes": "1020255"
},
{
"name": "CSS",
"bytes": "208338"
},
{
"name": "CoffeeScript",
"bytes": "87200"
},
{
"name": "Eagle",
"bytes": "1632170"
},
{
"name": "Go",
"bytes": "3646"
},
{
"name": "Groff",
"bytes": "286691"
},
{
"name": "HTML",
"bytes": "41527"
},
{
"name": "JavaScript",
"bytes": "403603"
},
{
"name": "Makefile",
"bytes": "33808"
},
{
"name": "Objective-C",
"bytes": "69457"
},
{
"name": "Perl",
"bytes": "96047"
},
{
"name": "Processing",
"bytes": "1304"
},
{
"name": "Python",
"bytes": "13358098"
},
{
"name": "Shell",
"bytes": "68795"
},
{
"name": "TeX",
"bytes": "4317"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import yaml
from six import print_
from ccmlib import common
from . import TEST_DIR
from . import ccmtest
CLUSTER_PATH = TEST_DIR
class TestCCMCmd(ccmtest.Tester):
def __init__(self, *args, **kwargs):
ccmtest.Tester.__init__(self, *args, **kwargs)
class TestCCMCreate(TestCCMCmd):
def tearDown(self):
p = subprocess.Popen(['ccm', 'remove'])
p.wait()
def create_cmd(self, args=None, name='test', version='2.0.10'):
if args is None:
args = []
if version:
args = ['ccm', 'create', name, '-v', version] + args
else:
args = ['ccm', 'create', name] + args
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def validate_output(self, process):
stdout, stderr = process.communicate()
try:
print_("[OUT] {}".format(stdout))
self.assertEqual(len(stderr), 0)
except AssertionError:
print_("[ERROR] {}".format(stderr.strip()))
raise
def cluster_create_version_test(self):
self.validate_output(self.create_cmd())
def cluster_create_cassandra_dir_test(self):
c_dir = common.get_default_path()
c_dir = os.path.join(c_dir, 'repository')
c_dir = os.path.join(c_dir, os.listdir(c_dir)[0])
args = ['--install-dir', c_dir]
self.validate_output(self.create_cmd(args, version=None))
def cluster_create_populate_test(self):
args = ['-n', '3']
self.validate_output(self.create_cmd(args))
def cluster_create_no_switch_test(self):
self.create_cmd(args=None, name='not_test')
args = ['--no-switch']
self.validate_output(self.create_cmd(args))
self.assertEqual('not_test', common.current_cluster_name(common.get_default_path()))
p = subprocess.Popen(['ccm', 'remove'])
p.wait()
p = subprocess.Popen(['ccm', 'switch', 'test'])
p.wait()
p = subprocess.Popen(['ccm', 'remove'])
p.wait()
def cluster_create_start_test(self):
args = ['-n', '1', '-s']
self.validate_output(self.create_cmd(args))
pidfile = os.path.join(common.get_default_path(), 'test', 'node1', 'cassandra.pid')
with open(pidfile, 'r') as f:
pid = int(f.readline().strip())
os.kill(pid, 0)
def cluster_create_debug_start_test(self):
args = ['-n', '1', '-s', '-d']
p = self.create_cmd(args)
stdout, stderr = p.communicate()
print_("[OUT] {}".format(stdout))
self.assertGreater(len(stdout), 18000)
self.assertEqual(len(stderr), 0)
def cluster_create_vnodes_test(self):
args = ['-n', '1', '--vnodes']
self.validate_output(self.create_cmd(args))
yaml_path = os.path.join(common.get_default_path(), 'test', 'node1', 'conf', 'cassandra.yaml')
with open(yaml_path, 'r') as f:
data = yaml.load(f)
self.assertEqual(256, data['num_tokens'])
|
{
"content_hash": "2e7460bb677a91bf05e9dc70116c3a4e",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 102,
"avg_line_length": 31.54639175257732,
"alnum_prop": 0.5748366013071895,
"repo_name": "thobbs/ccm",
"id": "caf9793d463d3a85b2f3e610789eda05c8fb026a",
"size": "3060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cmds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "301162"
},
{
"name": "Shell",
"bytes": "2634"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.