repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
nishad89/newfies-dialer | newfies/dialer_contact/forms.py | 1 | 7725 | #
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django import forms
from django.forms import ModelForm, Textarea
from django.forms.widgets import NumberInput
from django.utils.translation import ugettext_lazy as _
from dialer_contact.models import Phonebook, Contact
from dialer_contact.constants import STATUS_CHOICE
from dialer_campaign.function_def import get_phonebook_list
#from dialer_contact.constants import CHOICE_TYPE
from bootstrap3_datetime.widgets import DateTimePicker
from mod_utils.forms import SaveUserModelForm, common_submit_buttons
from crispy_forms.helper import FormHelper
from crispy_forms.bootstrap import TabHolder, Tab
from crispy_forms.layout import Layout, Fieldset, Div
class AdminSearchForm(forms.Form):
"""General Search Form with From & To date para."""
from_date = forms.CharField(label=_('from'), required=False, max_length=10)
to_date = forms.CharField(label=_('to'), required=False, max_length=10)
class SearchForm(AdminSearchForm):
"""General Search Form with From & To date para."""
from_date = forms.CharField(label=_('from').capitalize(), required=False, max_length=10,
widget=DateTimePicker(options={"format": "YYYY-MM-DD", "pickTime": False}))
to_date = forms.CharField(label=_('to').capitalize(), required=False, max_length=10,
widget=DateTimePicker(options={"format": "YYYY-MM-DD", "pickTime": False}))
class FileImport(forms.Form):
"""General Form : CSV file upload"""
csv_file = forms.FileField(
label=_('Upload CSV file using the pipe "|" as the field delimiter, e.g. ' +
'1234567890|surname|forename|email@somewhere.com|test-contact|1|' +
'address|city|state|US|unit|{"age":"32","title":"doctor"}|'),
required=True,
error_messages={'required': 'please upload a CSV File'})
def clean_csv_file(self):
"""Form Validation : File extension Check"""
filename = self.cleaned_data["csv_file"]
file_exts = ["csv", "txt"]
if str(filename).split(".")[1].lower() in file_exts:
return filename
else:
raise forms.ValidationError(_(u'document types accepted: %s' % ' '.join(file_exts)))
class Contact_fileImport(FileImport):
"""Admin Form : Import CSV file with phonebook"""
phonebook = forms.ChoiceField(label=_("phonebook").capitalize(), required=False, help_text=_("select phonebook"))
def __init__(self, user, *args, **kwargs):
super(Contact_fileImport, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'well'
self.helper.layout = Layout(
Div(
Div(Fieldset('', 'phonebook', 'csv_file')),
),
)
common_submit_buttons(self.helper.layout, 'import')
# To get user's phonebook list
if user: # and not user.is_superuser
self.fields['phonebook'].choices = get_phonebook_list(user)
class PhonebookForm(SaveUserModelForm):
"""Phonebook ModelForm"""
class Meta:
model = Phonebook
fields = ['name', 'description']
exclude = ('user',)
widgets = {
'description': Textarea(attrs={'cols': 26, 'rows': 3}),
}
def __init__(self, *args, **kwargs):
super(PhonebookForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'well'
self.helper.layout = Layout(
Div(
Div(Fieldset('', 'name', 'description', css_class='col-md-6')),
),
)
if self.instance.id:
common_submit_buttons(self.helper.layout, 'update')
else:
common_submit_buttons(self.helper.layout)
def phonebook_list(user):
"""Return phonebook list of logged in user"""
result_list = []
result_list.append((0, '---'))
for phonebook in Phonebook.objects.filter(user=user).order_by('id'):
result_list.append((phonebook.id, phonebook.name))
return result_list
class ContactForm(ModelForm):
"""Contact ModelForm"""
class Meta:
model = Contact
widgets = {
'description': Textarea(attrs={'cols': 23, 'rows': 3}),
}
def __init__(self, user, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
if self.instance.id:
form_action = common_submit_buttons(default_action='update')
else:
form_action = common_submit_buttons(default_action='add')
css_class = 'col-md-6'
self.helper.layout = Layout(
TabHolder(
Tab(_('general').capitalize(),
Div(
Div('phonebook', css_class=css_class),
Div('contact', css_class=css_class),
Div('last_name', css_class=css_class),
Div('first_name', css_class=css_class),
Div('status', css_class=css_class),
Div('email', css_class=css_class),
css_class='row'
),
form_action,
css_class='well'
),
Tab(_('advanced data').capitalize(),
Div(
Div('unit_number', css_class=css_class),
Div('address', css_class=css_class),
Div('city', css_class=css_class),
Div('state', css_class=css_class),
Div('country', css_class=css_class),
Div('description', css_class=css_class),
Div('additional_vars', css_class=css_class),
css_class='row'
),
form_action,
css_class='well'
),
),
)
# To get user's phonebook list
if user:
self.fields['phonebook'].choices = phonebook_list(user)
class ContactSearchForm(forms.Form):
"""Search Form on Contact List"""
contact_no = forms.CharField(label=_('contact number').capitalize(), required=False, widget=NumberInput())
contact_name = forms.CharField(label=_('contact name').capitalize(), required=False, widget=forms.TextInput(attrs={'size': 15}))
phonebook = forms.ChoiceField(label=_('phonebook').capitalize(), required=False)
contact_status = forms.TypedChoiceField(label=_('status').capitalize(), required=False, choices=list(STATUS_CHOICE),
initial=STATUS_CHOICE.ALL)
def __init__(self, user, *args, **kwargs):
super(ContactSearchForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_class = 'well'
css_class = 'col-md-3'
self.helper.layout = Layout(
Div(
Div('contact_no', css_class=css_class),
Div('contact_name', css_class=css_class),
Div('phonebook', css_class=css_class),
Div('contact_status', css_class=css_class),
css_class='row'
),
)
common_submit_buttons(self.helper.layout, 'search')
if user:
self.fields['phonebook'].choices = phonebook_list(user)
| mpl-2.0 |
jaredly/pyjamas | examples/mail/AboutDialog.py | 7 | 2302 | from pyjamas.ui.Button import Button
from pyjamas.ui.DialogBox import DialogBox
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.HTML import HTML
from pyjamas.ui.Image import Image
from pyjamas.ui import KeyboardListener
from pyjamas.ui.Widget import Widget
from pyjamas.ui import HasAlignment
class AboutDialog(DialogBox):
LOGO_IMAGE = "http://trac.pyworks.org/pyjamas/chrome/site/pyjamas-logo-small.png"
def __init__(self):
DialogBox.__init__(self)
# Use this opportunity to set the dialog's caption.
self.setText("About the Mail Sample")
# Create a DockPanel to contain the 'about' label and the 'OK' button.
outer = DockPanel()
outer.setSpacing(4)
outer.add(Image(AboutDialog.LOGO_IMAGE), DockPanel.WEST)
# Create the 'OK' button, along with a listener that hides the dialog
# when the button is clicked. Adding it to the 'south' position within
# the dock causes it to be placed at the bottom.
buttonPanel = HorizontalPanel()
buttonPanel.setHorizontalAlignment(HasAlignment.ALIGN_RIGHT)
buttonPanel.add(Button("Close", self))
outer.add(buttonPanel, DockPanel.SOUTH)
# Create the 'about' label. Placing it in the 'rest' position within the
# dock causes it to take up any remaining space after the 'OK' button
# has been laid out.
textplain = "This sample application demonstrates the construction "
textplain += "of a complex user interface using pyjamas' built-in widgets. Have a look "
textplain += "at the code to see how easy it is to build your own apps!"
text = HTML(textplain)
text.setStyleName("mail-AboutText")
outer.add(text, DockPanel.CENTER)
# Add a bit of spacing and margin to the dock to keep the components from
# being placed too closely together.
outer.setSpacing(8)
self.setWidget(outer)
def onClick(self, sender):
self.hide()
def onKeyDownPreview(self, key, modifiers):
# Use the popup's key preview hooks to close the dialog when either
# enter or escape is pressed.
if (key == KeyboardListener.KEY_ESCAPE or key == KeyboardListener.KEY_ENTER):
self.hide()
return True
| apache-2.0 |
suto/infernal-twin | build/pip/pip/_vendor/requests/packages/urllib3/connection.py | 483 | 9011 | import datetime
import sys
import socket
from socket import timeout as SocketTimeout
import warnings
from .packages import six
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection as _HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
HTTPSConnection = DummyConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError: # Python 2:
class ConnectionError(Exception):
pass
from .exceptions import (
ConnectTimeoutError,
SystemTimeWarning,
SecurityWarning,
)
from .packages.ssl_match_hostname import match_hostname
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
assert_fingerprint,
)
from .util import connection
port_by_scheme = {
'http': 80,
'https': 443,
}
RECENT_DATE = datetime.date(2014, 1, 1)
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
conn = self._new_conn()
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif resolved_cert_reqs != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate has no `subjectAltName`, falling back to check for a `commonName` for now. '
'This feature is being removed by major browsers and deprecated by RFC 2818. '
'(See https://github.com/shazow/urllib3/issues/497 for details.)'),
SecurityWarning
)
match_hostname(cert, self.assert_hostname or hostname)
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED
or self.assert_fingerprint is not None)
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
else:
HTTPSConnection = DummyConnection
| gpl-3.0 |
Zeromixis/ZexGameEngine | External/freetype/src/tools/docmaker/formatter.py | 109 | 6000 | #
# formatter.py
#
# Convert parsed content blocks to a structured document (library file).
#
# Copyright 2002-2015 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
#
# This is the base Formatter class. Its purpose is to convert a content
# processor's data into specific documents (i.e., table of contents, global
# index, and individual API reference indices).
#
# You need to sub-class it to output anything sensible. For example, the
# file `tohtml.py' contains the definition of the `HtmlFormatter' sub-class
# to output HTML.
#
from sources import *
from content import *
from utils import *
################################################################
##
## FORMATTER CLASS
##
class Formatter:
def __init__( self, processor ):
self.processor = processor
self.identifiers = {}
self.chapters = processor.chapters
self.sections = processor.sections.values()
self.block_index = []
# store all blocks in a dictionary
self.blocks = []
for section in self.sections:
for block in section.blocks.values():
self.add_identifier( block.name, block )
# add enumeration values to the index, since this is useful
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
self.add_identifier( field.name, block )
self.block_index = self.identifiers.keys()
self.block_index.sort( key = index_key )
def add_identifier( self, name, block ):
if name in self.identifiers:
# duplicate name!
sys.stderr.write( "WARNING: duplicate definition for"
+ " '" + name + "' "
+ "in " + block.location() + ", "
+ "previous definition in "
+ self.identifiers[name].location()
+ "\n" )
else:
self.identifiers[name] = block
#
# formatting the table of contents
#
def toc_enter( self ):
pass
def toc_chapter_enter( self, chapter ):
pass
def toc_section_enter( self, section ):
pass
def toc_section_exit( self, section ):
pass
def toc_chapter_exit( self, chapter ):
pass
def toc_index( self, index_filename ):
pass
def toc_exit( self ):
pass
def toc_dump( self, toc_filename = None, index_filename = None ):
output = None
if toc_filename:
output = open_output( toc_filename )
self.toc_enter()
for chap in self.processor.chapters:
self.toc_chapter_enter( chap )
for section in chap.sections:
self.toc_section_enter( section )
self.toc_section_exit( section )
self.toc_chapter_exit( chap )
self.toc_index( index_filename )
self.toc_exit()
if output:
close_output( output )
#
# formatting the index
#
def index_enter( self ):
pass
def index_name_enter( self, name ):
pass
def index_name_exit( self, name ):
pass
def index_exit( self ):
pass
def index_dump( self, index_filename = None ):
output = None
if index_filename:
output = open_output( index_filename )
self.index_enter()
for name in self.block_index:
self.index_name_enter( name )
self.index_name_exit( name )
self.index_exit()
if output:
close_output( output )
#
# formatting a section
#
def section_enter( self, section ):
pass
def block_enter( self, block ):
pass
def markup_enter( self, markup, block = None ):
pass
def field_enter( self, field, markup = None, block = None ):
pass
def field_exit( self, field, markup = None, block = None ):
pass
def markup_exit( self, markup, block = None ):
pass
def block_exit( self, block ):
pass
def section_exit( self, section ):
pass
def section_dump( self, section, section_filename = None ):
output = None
if section_filename:
output = open_output( section_filename )
self.section_enter( section )
for name in section.block_names:
skip_entry = 0
try:
block = self.identifiers[name]
# `block_names' can contain field names also,
# which we filter out
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
if field.name == name:
skip_entry = 1
except:
skip_entry = 1 # this happens e.g. for `/empty/' entries
if skip_entry:
continue
self.block_enter( block )
for markup in block.markups[1:]: # always ignore first markup!
self.markup_enter( markup, block )
for field in markup.fields:
self.field_enter( field, markup, block )
self.field_exit( field, markup, block )
self.markup_exit( markup, block )
self.block_exit( block )
self.section_exit( section )
if output:
close_output( output )
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section )
# eof
| gpl-2.0 |
samuelmaudo/yepes | yepes/test/runner.py | 1 | 1557 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
import sys
from unittest.runner import _WritelnDecorator as WriteLnDecorator
from unittest.signals import registerResult
from yepes.test.result import TestResult
class TestRunner(object):
"""
A test result class that can print formatted text results to a stream.
Used by ``SugarTestRunner``.
"""
resultclass = TestResult
def __init__(self, stream=None, verbosity=1, failfast=False,
resultclass=None, plugins=()):
if stream is None:
stream = sys.stderr
if not isinstance(stream, WriteLnDecorator):
stream = WriteLnDecorator(stream)
self.stream = stream
self.verbosity = verbosity
self.failfast = failfast
if resultclass is not None:
self.resultclass = resultclass
self.plugins = plugins
def makeResult(self):
return self.resultclass(
stream=self.stream,
verbosity=self.verbosity,
plugins=[
plugin
for plugin
in self.plugins
if plugin.enabled
],
)
def run(self, test):
"""
Run the given test case or test suite.
"""
result = self.makeResult()
registerResult(result)
result.failfast = self.failfast
result.startTestRun()
try:
test(result)
finally:
result.stopTestRun()
result.report()
return result
| bsd-3-clause |
longnow/plexmark | plexmark.py | 1 | 8758 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pickle, os, time, asyncio, concurrent, functools, bisect, random, shutil
from glob import glob
from itertools import accumulate
from collections import defaultdict
import unicodedataplus as unicodedata
import regex as re
import config
import cachetools
import aiopg
model_cache = cachetools.LFUCache(10)
BEGIN = "\u0002" # Start of Text
END = "\u0003" # End of Text
async def init():
global pool, executor
pool = await aiopg.create_pool("dbname={} user={}".format(config.DB_NAME, config.DB_USER), minsize=config.DB_POOL_MIN, maxsize=config.DB_POOL_MAX)
executor = concurrent.futures.ProcessPoolExecutor(max_workers=config.MAX_WORKERS)
async def run_in_process(*args, **kwargs):
loop = asyncio.get_event_loop()
return await loop.run_in_executor(executor, functools.partial(*args, **kwargs))
def _pickle_load(path):
return pickle.load(open(path, 'rb'))
async def pickle_load(*args):
return await run_in_process(_pickle_load, *args)
def _pickle_dump(obj, path):
pickle.dump(obj, open(path, 'wb'), pickle.HIGHEST_PROTOCOL)
async def pickle_dump(*args):
return await run_in_process(_pickle_dump, *args)
class PLText:
def __init__(self, uid, state_size, expr_score_list, chain=None):
self.uid = uid
self.state_size = state_size
self.expr_set = {unicodedata.normalize("NFD", ex[0]) for ex in expr_score_list}
self.chain = chain or PLChain(expr_score_list, state_size)
def test_expr_output(self, expr):
return expr not in self.expr_set
def make_expr(self, init_state=None, tries=10, test_output=True, skip_re=r"", probability=False):
found = False
for _ in range(tries):
if init_state:
init_state = unicodedata.normalize("NFD", init_state)
prefix = init_state.strip(BEGIN)
init_state = init_state.rjust(self.state_size, BEGIN)[-self.state_size:]
else:
prefix = ''
try:
if probability:
expr, prob = self.chain.walk(init_state, probability)
expr = prefix + expr
else:
expr = prefix + self.chain.walk(init_state, probability)
except KeyError:
expr, prob = "", 0
if test_output:
if self.test_expr_output(expr):
if skip_re:
if not re.search(unicodedata.normalize("NFD", skip_re), expr):
found = True
else:
found = True
else:
found = True
if found:
if probability:
return expr, prob
else:
return expr
def expr_prob(self, expr):
prepped_expr = BEGIN * self.state_size + unicodedata.normalize("NFD", expr) + END
output = 1
for i in range(len(expr) + 1):
output *= self.chain.prob(prepped_expr[i:i+self.state_size], prepped_expr[i+self.state_size])
return output
class PLChain:
def __init__(self, corpus, state_size, model=None):
self.state_size = state_size
self.model = model or self.build(corpus, self.state_size)
self.precompute_begin_state()
def build(self, corpus, state_size):
model = {}
model = defaultdict(lambda: defaultdict(int))
for run, score in corpus:
norm_run = unicodedata.normalize("NFD", run)
items = (BEGIN * state_size) + norm_run + END
for i in range(len(norm_run) + 1):
state = items[i:i+state_size]
follow = items[i+state_size]
model[state][follow] += score
model = dict({k: dict(model[k]) for k in model})
return model
def precompute_begin_state(self):
begin_state = BEGIN * self.state_size
choices, weights = zip(*self.model[begin_state].items())
cumdist = list(accumulate(weights))
self.begin_cumdist = cumdist
self.begin_choices = choices
self.begin_weights = weights
def move(self, state, probability=False):
if state == BEGIN * self.state_size:
choices = self.begin_choices
cumdist = self.begin_cumdist
weights = self.begin_weights
else:
choices, weights = zip(*self.model[state].items())
cumdist = list(accumulate(weights))
r = random.random() * cumdist[-1]
index = bisect.bisect(cumdist, r)
selection = choices[index]
if probability:
prob = weights[index] / cumdist[-1]
return selection, prob
return selection
def gen(self, init_state=None, probability=False):
state = init_state or BEGIN * self.state_size
while True:
next_char = self.move(state)
if next_char == END: break
yield next_char
state = state[1:] + next_char
def walk(self, init_state=None, probability=False):
if probability:
state = init_state or BEGIN * self.state_size
output = ''
output_prob = 1
while True:
next_char, prob = self.move(state, probability)
output_prob *= prob
if next_char == END: break
output += next_char
state = state[1:] + next_char
return output, output_prob
else:
return ''.join(list(self.gen(init_state, probability)))
def prob(self, state, char):
try:
total_score = sum(self.model[state].values())
return self.model[state][char] / total_score
except KeyError:
return 0.0
async def pull_expr(uid, cache=True):
if cache:
query = """
SELECT txt, score
FROM exprx
WHERE langvar = uid_langvar(%s)
"""
else:
query = """
SELECT expr.txt, grp_quality_score(array_agg(denotationx.grp), array_agg(denotationx.quality))
FROM expr
JOIN denotationx ON (denotationx.expr = expr.id)
WHERE expr.langvar = uid_langvar(%s)
GROUP BY expr.id
"""
print('fetching expressions for {}'.format(uid))
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute(query, (uid,), timeout=config.REQUEST_TIMEOUT)
return await cur.fetchall()
async def generate_model(uid, state_size):
expr_score_list = await pull_expr(uid)
print('building model for {}, state size: {}'.format(uid, state_size))
return await run_in_process(PLText, uid=uid, state_size=state_size, expr_score_list=expr_score_list)
async def pull_model(uid, state_size):
try:
pltext = model_cache[(uid, state_size)]
except KeyError:
try:
pltext = await pickle_load(os.path.join(config.DATA_DIR, uid, str(state_size) + '.pickle'))
except (FileNotFoundError, EOFError):
pltext = await generate_model(uid, state_size)
asyncio.ensure_future(pickle_model(uid, state_size, pltext))
model_cache[(uid, state_size)] = pltext
return pltext
async def pickle_model(uid, state_size, pltext):
os.makedirs(os.path.join(config.DATA_DIR, uid), exist_ok=True)
await pickle_dump(pltext, os.path.join(config.DATA_DIR, uid, str(state_size) + '.pickle'))
def clear_uid_dir(uid):
for filename in glob(os.path.join(config.DATA_DIR, uid, '*.pickle')):
os.remove(filename)
async def cleanup(max_age):
uid_list = [os.path.basename(filename) for filename in glob(os.path.join(config.DATA_DIR, '*'))]
now = time.time()
for uid in uid_list:
try:
# file_age = now - os.path.getmtime(os.path.join(config.DATA_DIR, uid, 'expr_score_list.pickle'))
file_age = now - os.path.getctime(os.path.join(config.DATA_DIR, uid))
print("{} age is {}".format(uid, file_age))
if file_age > max_age:
print(uid + " is old. clearing...")
# await run_in_process(clear_uid_dir, uid)
shutil.rmtree(os.path.join(config.DATA_DIR, uid))
for key in model_cache.keys():
if key[0] == uid:
del model_cache[key]
except FileNotFoundError:
pass
async def generate_words(uid, state_size, count, init_state=None, skip_re=r""):
model = await pull_model(uid, state_size)
expr_list = [model.make_expr(init_state=init_state, tries=100, skip_re=skip_re) for _ in range(count)]
return [expr for expr in expr_list if expr]
| mit |
ohsu-computational-biology/server | tests/unit/test_variant_annotations.py | 2 | 3956 | """
Unit tests for variant objects. This is used for all tests
that can be performed in isolation from input data.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hashlib
import unittest
import ga4gh.protocol as protocol
import ga4gh.datarepo as datarepo
import ga4gh.datamodel.variants as variants
import ga4gh.datamodel.datasets as datasets
import tests.paths as paths
class TestHtslibVariantAnnotationSet(unittest.TestCase):
"""
Unit tests for the abstract variant set.
"""
def _createVariantAnnotationSet(self, vcfDir):
"""
Creates a VariantAnnotationSet from the specified directory of
VCF files.
"""
self._variantSetName = "testVariantSet"
self._repo = datarepo.SqlDataRepository(paths.testDataRepo)
self._repo.open(datarepo.MODE_READ)
self._dataset = datasets.Dataset("testDs")
self._variantSet = variants.HtslibVariantSet(
self._dataset, self._variantSetName)
self._variantSet.populateFromDirectory(vcfDir)
self._variantAnnotationSet = variants.HtslibVariantAnnotationSet(
self._variantSet, "testVAs")
self._variantAnnotationSet.setOntology(
self._repo.getOntologyByName(paths.ontologyName))
def setUp(self):
vcfDir = "tests/data/datasets/dataset1/variants/WASH7P_annotation"
self._createVariantAnnotationSet(vcfDir)
def testConvertLocation(self):
loc = protocol.AlleleLocation()
loc.start = 150
pos = "151/305"
testLoc = self._variantAnnotationSet.convertLocation(pos)
self.assertEqual(testLoc, loc)
def testThousandGenomesAnnotation(self):
vcfDir = "tests/data/datasets/dataset1/variants/1kg.3.annotations"
self._createVariantAnnotationSet(vcfDir)
self.assertTrue(self._variantSet.isAnnotated())
def testConvertLocationHgvsC(self):
loc = protocol.AlleleLocation()
loc.start = 430
loc.reference_sequence = "T"
loc.alternate_sequence = "A"
hgvsC = "NM_001005484.1:c.431T>A"
testLoc = self._variantAnnotationSet.convertLocationHgvsC(hgvsC)
self.assertEqual(testLoc, loc)
def testConvertLocationHgvsP(self):
loc = protocol.AlleleLocation()
loc.start = 143
loc.alternate_sequence = "Asn"
loc.reference_sequence = "Ile"
hgvsP = "NM_001005484.1:p.Ile144Asn"
testLoc = self._variantAnnotationSet.convertLocationHgvsP(hgvsP)
self.assertEqual(testLoc, loc)
def testAddLocations(self):
effect = protocol.TranscriptEffect()
effect.hgvs_annotation.protein = "NM_001005484.1:p.Ile144Asn"
effect.hgvs_annotation.transcript = "NM_001005484.1:c.431T>A"
effect.protein_location.alternate_sequence = "Asn"
effect.protein_location.reference_sequence = "Ile"
effect.protein_location.start = 143
effect.cds_location.alternate_sequence = "A"
effect.cds_location.reference_sequence = "T"
effect.cds_location.start = 430
effect.cdna_location.start = 430
protPos = "144/305"
cdnaPos = "431/918"
testEffect = self._variantAnnotationSet.addLocations(
effect, protPos, cdnaPos)
self.assertEqual(testEffect, effect)
def testHashVariantAnnotation(self):
annotation = protocol.VariantAnnotation()
variant = protocol.Variant()
expected = hashlib.md5('\t()\t[]\t').hexdigest()
hashed = self._variantAnnotationSet.hashVariantAnnotation(
variant, annotation)
self.assertEqual(hashed, expected)
def testGetTranscriptEffectId(self):
effect = protocol.TranscriptEffect()
expected = hashlib.md5("\t\t[]\t").hexdigest()
hashed = self._variantAnnotationSet.getTranscriptEffectId(effect)
self.assertEqual(hashed, expected)
| apache-2.0 |
dragondjf/QMarkdowner | qframer/ffloatwidget.py | 1 | 6545 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from .qt.QtCore import *
from .qt.QtGui import *
from .fmoveablewidget import FMoveableWidget
from .ftitlebar import BaseToolButton, baseHeight
class FTitleBar(QFrame):
settingMenuShowed = Signal()
skinMenuShowed = Signal()
modeed = Signal(bool)
locked = Signal(bool)
pined = Signal(bool)
closed = Signal()
closestyle = '''
QToolButton#close{
background-color: transparent;
color: white;
}
QToolButton#close:hover{
background-color: red;
border: 1px;
}
'''
logostyle = '''
QToolButton#logo{
background-color: transparent;
}
QToolButton#logo:hover{
background-color: transparent;
}
'''
def __init__(self, parent=None):
super(FTitleBar, self).__init__(parent)
self.initData()
self.initUI()
def initData(self):
self.settingDownIcon = QIcon(":/icons/dark/appbar.control.down.png")
self.clothesIcon = QIcon(":/icons/dark/appbar.clothes.shirt.png")
self.lockIcon = QIcon(":/icons/dark/appbar.lock.png")
self.unlockIcon = QIcon(":/icons/dark/appbar.unlock.keyhole.png")
self.pinIcon = QIcon(":/icons/dark/appbar.pin.png")
self.unPinIcon = QIcon(":/icons/dark/appbar.pin.remove.png")
self.closeIcon = QIcon(":/icons/dark/appbar.close.png")
self.max_flag = False
self.lock_flag = False
self.pin_flag = False
def initUI(self):
self.setFixedHeight(baseHeight)
self.lockButton = BaseToolButton()
self.lockButton.setIcon(self.unlockIcon)
self.pinButton = BaseToolButton()
self.pinButton.setIcon(self.unPinIcon)
self.settingDownButton = BaseToolButton()
self.settingDownButton.setIcon(self.settingDownIcon)
self.closeButton = BaseToolButton()
self.closeButton.setObjectName("close")
self.closeButton.setStyleSheet(self.closestyle)
self.closeButton.setIcon(self.closeIcon)
mainLayout = QHBoxLayout()
mainLayout.addStretch()
mainLayout.addWidget(self.settingDownButton)
mainLayout.addWidget(self.pinButton)
mainLayout.addWidget(self.lockButton)
mainLayout.addWidget(self.closeButton)
mainLayout.setContentsMargins(0, 0, 5, 0)
mainLayout.setSpacing(0)
self.setLayout(mainLayout)
self.settingDownButton.clicked.connect(self.settingMenuShowed)
self.lockButton.clicked.connect(self.swithLockIcon)
self.pinButton.clicked.connect(self.swithPinIcon)
self.closeButton.clicked.connect(self.closed)
def swithLockIcon(self):
if self.lock_flag:
self.lockButton.setIcon(self.unlockIcon)
else:
self.lockButton.setIcon(self.lockIcon)
self.lock_flag = not self.lock_flag
self.locked.emit(self.lock_flag)
def swithPinIcon(self):
if self.pin_flag:
self.pinButton.setIcon(self.unPinIcon)
else:
self.pinButton.setIcon(self.pinIcon)
self.pin_flag = not self.pin_flag
self.pined.emit(self.pin_flag)
def isPined(self):
return self.pin_flag
def isLocked(self):
return self.lock_flag
def isMax(self):
return self.max_flag
class FFloatWidget(FMoveableWidget):
default_width = 300
def __init__(self, parent=None):
super(FFloatWidget, self).__init__()
self.parent = parent
self.setWindowFlags(
Qt.WindowType_Mask | Qt.SubWindow | Qt.FramelessWindowHint)
self._initShowAnimation()
self._initHideAnimation()
self._initUI()
self._initConnect()
def _initUI(self):
self.setFixedWidth(self.default_width)
self.titleBar = FTitleBar(self)
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.titleBar)
mainLayout.addStretch()
mainLayout.setSpacing(0)
mainLayout.setContentsMargins(0, 0, 0, 0)
self.setLayout(mainLayout)
self.setGeometry(self.endRect)
def _initConnect(self):
self.titleBar.closed.connect(self.animationHide)
self.titleBar.pined.connect(self.setFlags)
@property
def startRect(self):
mainwindow = self.parent
startRect = QRect(mainwindow.x() + mainwindow.width(),
mainwindow.y() + mainwindow.titleBar().height(),
self.w, mainwindow.height() -
mainwindow.titleBar().height())
return startRect
@property
def endRect(self):
mainwindow = self.parent
endRect = QRect(mainwindow.x() + mainwindow.width(),
mainwindow.y() + mainwindow.titleBar().height(),
self.w, mainwindow.height() -
mainwindow.titleBar().height())
return endRect
@property
def h(self):
return self.height()
@property
def w(self):
return self.width()
def _initShowAnimation(self):
self.showanimation = QPropertyAnimation(self, ('windowOpacity').encode('utf-8'))
self.showanimation.setStartValue(0)
self.showanimation.setEndValue(1)
self.showanimation.setDuration(1000)
self.showanimation.setEasingCurve(QEasingCurve.OutCubic)
def _initHideAnimation(self):
self.hideanimation = QPropertyAnimation(self, ('windowOpacity').encode('utf-8'))
self.hideanimation.setStartValue(1)
self.hideanimation.setEndValue(0)
self.hideanimation.setDuration(1000)
self.hideanimation.setEasingCurve(QEasingCurve.OutCubic)
self.hideanimation.finished.connect(self.hide)
def animationShow(self):
self.show()
self.showanimation.start()
def animationHide(self):
self.hideanimation.start()
def setFlags(self, flag):
if flag:
self.setWindowFlags(
Qt.WindowType_Mask | Qt.SubWindow |
Qt.WindowStaysOnTopHint | Qt.FramelessWindowHint)
self.show()
else:
self.setWindowFlags(
Qt.WindowType_Mask | Qt.SubWindow | Qt.FramelessWindowHint)
self.show()
def mouseMoveEvent(self, event):
if self.isLocked():
pass
else:
super(FFloatWidget, self).mouseMoveEvent(event)
def isLocked(self):
return self.titleBar.isLocked()
| mit |
QQuick/Transcrypt | transcrypt/development/manual_tests/module_logging/ajaxlogclient.py | 1 | 1843 | # File: ajaxlogclient.py
# Author: Carl Allendorph
# Date: 29NOV2016
#
# Description:
# This file contains the implementation of a logging client
# to test the ajax logging handler
import logging
import logging.handlers as loghdlr
class HTMLHandler(logging.Handler):
""" This handler is used to provide a view on the screen of the
logging messages that have been sent over the AJAX handler. This
is primarily for debugging purposes.
"""
def __init__(self, elemId):
""" Configure the HTML Handler
@param elemId parent element where we will start pushing
logging messages.
"""
logging.Handler.__init__(self)
self._elem = document.getElementById(elemId)
def emit(self, record):
msg = self.format(record)
if self._elem:
node = document.createElement("LI")
content = document.createTextNode(msg)
node.appendChild(content)
self._elem.appendChild(node)
def setupLogger():
root = logging.getLogger()
root.setLevel(10)
fmt = logging.Formatter("{levelname}[{asctime}]: {message}","%H:%M:%S", style="{")
headers = [
('X-Requested-With', 'XMLHttpRequest'),
('Content-type', 'application/x-www-form-urlencoded'),
]
ahdlr = loghdlr.AJAXHandler("http://127.0.0.1:8081/log", "POST")
#ahdlr = loghdlr.AJAXHandler("/log")
ahdlr.setLevel(10)
ahdlr.setFormatter(fmt)
htmlHdlr = HTMLHandler("log-output")
htmlHdlr.setLevel(10)
htmlHdlr.setFormatter(fmt)
root.addHandler(ahdlr)
root.addHandler(htmlHdlr)
logging.info("Started AJAX Logger")
setupLogger()
def logPeriodic():
logging.info("Message on The Bus goes Round and Round")
setInterval(logPeriodic, 1000)
| apache-2.0 |
jayflo/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
birdsarah/bokeh | examples/app/stock_applet/stock_data.py | 49 | 2405 | import os
from six.moves import urllib
import zipfile
from bokeh.util.string import encode_utf8
def extract_hosted_zip(data_url, save_dir, exclude_term=None):
"""Downloads, then extracts a zip file."""
zip_name = os.path.join(save_dir, 'temp.zip')
# get the zip file
try:
print('Downloading %r to %r' % (data_url, zip_name))
zip_name, hdrs = urllib.request.urlretrieve(url=data_url, filename=zip_name)
print('Download successfully completed')
except IOError as e:
print("Could not successfully retrieve %r" % data_url)
raise e
# extract, then remove temp file
extract_zip(zip_name=zip_name, exclude_term=exclude_term)
os.unlink(zip_name)
print("Extraction Complete")
def extract_zip(zip_name, exclude_term=None):
"""Extracts a zip file to its containing directory."""
zip_dir = os.path.dirname(os.path.abspath(zip_name))
try:
with zipfile.ZipFile(zip_name) as z:
# write each zipped file out if it isn't a directory
files = [zip_file for zip_file in z.namelist() if not zip_file.endswith('/')]
print('Extracting %i files from %r.' % (len(files), zip_name))
for zip_file in files:
# remove any provided extra directory term from zip file
if exclude_term:
dest_file = zip_file.replace(exclude_term, '')
else:
dest_file = zip_file
dest_file = os.path.normpath(os.path.join(zip_dir, dest_file))
dest_dir = os.path.dirname(dest_file)
# make directory if it does not exist
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
# read file from zip, then write to new directory
data = z.read(zip_file)
with open(dest_file, 'wb') as f:
f.write(encode_utf8(data))
except zipfile.error as e:
print("Bad zipfile (%r): %s" % (zip_name, e))
raise e
if __name__ == '__main__':
# info for retrieving and extracting the zip file
this_dir = os.path.dirname(os.path.realpath(__file__))
zip_file = 'http://quantquote.com/files/quantquote_daily_sp500_83986.zip'
zip_dir = 'quantquote_daily_sp500_83986/'
extract_hosted_zip(data_url=zip_file, save_dir=this_dir, exclude_term=zip_dir) | bsd-3-clause |
simbha/mAngE-Gin | lib/Django 1.7/django/db/migrations/optimizer.py | 33 | 13947 | from __future__ import unicode_literals
from django.db import migrations
from django.utils import six
class MigrationOptimizer(object):
"""
Powers the optimization process, where you provide a list of Operations
and you are returned a list of equal or shorter length - operations
are merged into one if possible.
For example, a CreateModel and an AddField can be optimized into a
new CreateModel, and CreateModel and DeleteModel can be optimized into
nothing.
"""
def optimize(self, operations, app_label=None):
"""
Main optimization entry point. Pass in a list of Operation instances,
get out a new list of Operation instances.
Unfortunately, due to the scope of the optimization (two combinable
operations might be separated by several hundred others), this can't be
done as a peephole optimization with checks/output implemented on
the Operations themselves; instead, the optimizer looks at each
individual operation and scans forwards in the list to see if there
are any matches, stopping at boundaries - operations which can't
be optimized over (RunSQL, operations on the same field/model, etc.)
The inner loop is run until the starting list is the same as the result
list, and then the result is returned. This means that operation
optimization must be stable and always return an equal or shorter list.
The app_label argument is optional, but if you pass it you'll get more
efficient optimization.
"""
# Internal tracking variable for test assertions about # of loops
self._iterations = 0
while True:
result = self.optimize_inner(operations, app_label)
self._iterations += 1
if result == operations:
return result
operations = result
def optimize_inner(self, operations, app_label=None):
"""
Inner optimization loop.
"""
new_operations = []
for i, operation in enumerate(operations):
# Compare it to each operation after it
for j, other in enumerate(operations[i + 1:]):
result = self.reduce(operation, other, operations[i + 1:i + j + 1])
if result is not None:
# Optimize! Add result, then remaining others, then return
new_operations.extend(result)
new_operations.extend(operations[i + 1:i + 1 + j])
new_operations.extend(operations[i + j + 2:])
return new_operations
if not self.can_optimize_through(operation, other, app_label):
new_operations.append(operation)
break
else:
new_operations.append(operation)
return new_operations
#### REDUCTION ####
def reduce(self, operation, other, in_between=None):
"""
Either returns a list of zero, one or two operations,
or None, meaning this pair cannot be optimized.
"""
submethods = [
(
migrations.CreateModel,
migrations.DeleteModel,
self.reduce_model_create_delete,
),
(
migrations.AlterModelTable,
migrations.DeleteModel,
self.reduce_model_alter_delete,
),
(
migrations.AlterUniqueTogether,
migrations.DeleteModel,
self.reduce_model_alter_delete,
),
(
migrations.AlterIndexTogether,
migrations.DeleteModel,
self.reduce_model_alter_delete,
),
(
migrations.CreateModel,
migrations.RenameModel,
self.reduce_model_create_rename,
),
(
migrations.RenameModel,
migrations.RenameModel,
self.reduce_model_rename_self,
),
(
migrations.CreateModel,
migrations.AddField,
self.reduce_create_model_add_field,
),
(
migrations.CreateModel,
migrations.AlterField,
self.reduce_create_model_alter_field,
),
(
migrations.CreateModel,
migrations.RemoveField,
self.reduce_create_model_remove_field,
),
(
migrations.AddField,
migrations.AlterField,
self.reduce_add_field_alter_field,
),
(
migrations.AddField,
migrations.RemoveField,
self.reduce_add_field_delete_field,
),
(
migrations.AlterField,
migrations.RemoveField,
self.reduce_alter_field_delete_field,
),
(
migrations.AddField,
migrations.RenameField,
self.reduce_add_field_rename_field,
),
(
migrations.AlterField,
migrations.RenameField,
self.reduce_alter_field_rename_field,
),
(
migrations.CreateModel,
migrations.RenameField,
self.reduce_create_model_rename_field,
),
(
migrations.RenameField,
migrations.RenameField,
self.reduce_rename_field_self,
),
]
for ia, ib, om in submethods:
if isinstance(operation, ia) and isinstance(other, ib):
return om(operation, other, in_between or [])
return None
def model_to_key(self, model):
"""
Takes either a model class or a "appname.ModelName" string
and returns (appname, modelname)
"""
if isinstance(model, six.string_types):
return model.split(".", 1)
else:
return (
model._meta.app_label,
model._meta.object_name,
)
def reduce_model_create_delete(self, operation, other, in_between):
"""
Folds a CreateModel and a DeleteModel into nothing.
"""
if (operation.name.lower() == other.name.lower() and
not operation.options.get("proxy", False)):
return []
def reduce_model_alter_delete(self, operation, other, in_between):
"""
Folds an AlterModelSomething and a DeleteModel into just delete.
"""
if operation.name.lower() == other.name.lower():
return [other]
def reduce_model_create_rename(self, operation, other, in_between):
"""
Folds a model rename into its create
"""
if operation.name.lower() == other.old_name.lower():
return [
migrations.CreateModel(
other.new_name,
fields=operation.fields,
options=operation.options,
bases=operation.bases,
)
]
def reduce_model_rename_self(self, operation, other, in_between):
"""
Folds a model rename into another one
"""
if operation.new_name.lower() == other.old_name.lower():
return [
migrations.RenameModel(
operation.old_name,
other.new_name,
)
]
def reduce_create_model_add_field(self, operation, other, in_between):
if operation.name.lower() == other.model_name.lower():
# Don't allow optimisations of FKs through models they reference
if hasattr(other.field, "rel") and other.field.rel:
for between in in_between:
# Check that it doesn't point to the model
app_label, object_name = self.model_to_key(other.field.rel.to)
if between.references_model(object_name, app_label):
return None
# Check that it's not through the model
if getattr(other.field.rel, "through", None):
app_label, object_name = self.model_to_key(other.field.rel.through)
if between.references_model(object_name, app_label):
return None
# OK, that's fine
return [
migrations.CreateModel(
operation.name,
fields=operation.fields + [(other.name, other.field)],
options=operation.options,
bases=operation.bases,
)
]
def reduce_create_model_alter_field(self, operation, other, in_between):
if operation.name.lower() == other.model_name.lower():
return [
migrations.CreateModel(
operation.name,
fields=[
(n, other.field if n == other.name else v)
for n, v in operation.fields
],
options=operation.options,
bases=operation.bases,
)
]
def reduce_create_model_rename_field(self, operation, other, in_between):
if operation.name.lower() == other.model_name.lower():
return [
migrations.CreateModel(
operation.name,
fields=[
(other.new_name if n == other.old_name else n, v)
for n, v in operation.fields
],
options=operation.options,
bases=operation.bases,
)
]
def reduce_create_model_remove_field(self, operation, other, in_between):
if operation.name.lower() == other.model_name.lower():
return [
migrations.CreateModel(
operation.name,
fields=[
(n, v)
for n, v in operation.fields
if n.lower() != other.name.lower()
],
options=operation.options,
bases=operation.bases,
)
]
def reduce_add_field_alter_field(self, operation, other, in_between):
if operation.model_name.lower() == other.model_name.lower() and operation.name.lower() == other.name.lower():
return [
migrations.AddField(
model_name=operation.model_name,
name=operation.name,
field=other.field,
)
]
def reduce_add_field_delete_field(self, operation, other, in_between):
if operation.model_name.lower() == other.model_name.lower() and operation.name.lower() == other.name.lower():
return []
def reduce_alter_field_delete_field(self, operation, other, in_between):
if operation.model_name.lower() == other.model_name.lower() and operation.name.lower() == other.name.lower():
return [other]
def reduce_add_field_rename_field(self, operation, other, in_between):
if operation.model_name.lower() == other.model_name.lower() and operation.name.lower() == other.old_name.lower():
return [
migrations.AddField(
model_name=operation.model_name,
name=other.new_name,
field=operation.field,
)
]
def reduce_alter_field_rename_field(self, operation, other, in_between):
if operation.model_name.lower() == other.model_name.lower() and operation.name.lower() == other.old_name.lower():
return [
other,
migrations.AlterField(
model_name=operation.model_name,
name=other.new_name,
field=operation.field,
),
]
def reduce_rename_field_self(self, operation, other, in_between):
if operation.model_name.lower() == other.model_name.lower() and operation.new_name.lower() == other.old_name.lower():
return [
migrations.RenameField(
operation.model_name,
operation.old_name,
other.new_name,
),
]
#### THROUGH CHECKS ####
def can_optimize_through(self, operation, other, app_label=None):
"""
Returns True if it's possible to optimize 'operation' with something
the other side of 'other'. This is possible if, for example, they
affect different models.
"""
MODEL_LEVEL_OPERATIONS = (
migrations.CreateModel,
migrations.AlterModelTable,
migrations.AlterUniqueTogether,
migrations.AlterIndexTogether,
)
FIELD_LEVEL_OPERATIONS = (
migrations.AddField,
migrations.AlterField,
)
# If it's a model level operation, let it through if there's
# nothing that looks like a reference to us in 'other'.
if isinstance(operation, MODEL_LEVEL_OPERATIONS):
if not other.references_model(operation.name, app_label):
return True
# If it's field level, only let it through things that don't reference
# the field (which includes not referencing the model)
if isinstance(operation, FIELD_LEVEL_OPERATIONS):
if not other.references_field(operation.model_name, operation.name, app_label):
return True
return False
| mit |
promptworks/keystone | keystone/common/sql/migrate_repo/versions/050_fk_consistent_indexes.py | 11 | 2112 | # Copyright 2014 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
def upgrade(migrate_engine):
if migrate_engine.name == 'mysql':
meta = sa.MetaData(bind=migrate_engine)
endpoint = sa.Table('endpoint', meta, autoload=True)
# NOTE(i159): MySQL requires indexes on referencing columns, and those
# indexes create automatically. That those indexes will have different
# names, depending on version of MySQL used. We shoud make this naming
# consistent, by reverting index name to a consistent condition.
if any(i for i in endpoint.indexes if
i.columns.keys() == ['service_id'] and i.name != 'service_id'):
# NOTE(i159): by this action will be made re-creation of an index
# with the new name. This can be considered as renaming under the
# MySQL rules.
sa.Index('service_id', endpoint.c.service_id).create()
user_group_membership = sa.Table('user_group_membership',
meta, autoload=True)
if any(i for i in user_group_membership.indexes if
i.columns.keys() == ['group_id'] and i.name != 'group_id'):
sa.Index('group_id', user_group_membership.c.group_id).create()
def downgrade(migrate_engine):
# NOTE(i159): index exists only in MySQL schemas, and got an inconsistent
# name only when MySQL 5.5 renamed it after re-creation
# (during migrations). So we just fixed inconsistency, there is no
# necessity to revert it.
pass
| apache-2.0 |
MonamAgarwal/final | GTG/gtk/backends_dialog/parameters_ui/textui.py | 4 | 3037 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from gi.repository import Gtk
class TextUI(Gtk.Box):
'''A widget to display a simple textbox and a label to describe its content
'''
def __init__(self, req, backend, width, description, parameter_name):
'''
Creates the textbox and the related label. Loads the current
content.
@param req: a Requester
@param backend: a backend object
@param width: the width of the Gtk.Label object
'''
super(TextUI, self).__init__()
self.backend = backend
self.req = req
self.parameter_name = parameter_name
self.description = description
self._populate_gtk(width)
def _populate_gtk(self, width):
'''Creates the gtk widgets
@param width: the width of the Gtk.Label object
'''
label = Gtk.Label(label="%s:" % self.description)
label.set_line_wrap(True)
label.set_alignment(xalign=0, yalign=0.5)
label.set_size_request(width=width, height=-1)
self.pack_start(label, False, True, 0)
align = Gtk.Alignment.new(0, 0.5, 1, 0)
align.set_padding(0, 0, 10, 0)
self.pack_start(align, True, True, 0)
self.textbox = Gtk.Entry()
backend_parameters = self.backend.get_parameters()[self.parameter_name]
self.textbox.set_text(backend_parameters)
self.textbox.connect('changed', self.on_text_modified)
align.add(self.textbox)
def commit_changes(self):
'''Saves the changes to the backend parameter'''
self.backend.set_parameter(self.parameter_name,
self.textbox.get_text())
def on_text_modified(self, sender):
''' Signal callback, executed when the user changes the text.
Disables the backend. The user will re-enable it to confirm the changes
(s)he made.
@param sender: not used, only here for signal compatibility
'''
if self.backend.is_enabled() and not self.backend.is_default():
self.req.set_backend_enabled(self.backend.get_id(), False)
| gpl-3.0 |
Azure/azure-sdk-for-python | sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py | 1 | 19262 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core import MatchConditions
from azure.core.credentials import AzureKeyCredential
from azure.core.tracing.decorator import distributed_trace
from azure.core.paging import ItemPaged
from .._api_versions import DEFAULT_VERSION
from ._generated import SearchClient as _SearchServiceClient
from ._utils import (
get_access_conditions,
normalize_endpoint,
)
from .._headers_mixin import HeadersMixin
from .._utils import get_authentication_policy
from .._version import SDK_MONIKER
from .._search_client import SearchClient
from .models import SearchIndex, SynonymMap
if TYPE_CHECKING:
# pylint:disable=unused-import,ungrouped-imports
from .models._models import AnalyzeTextOptions
from typing import Any, Dict, List, Sequence, Union, Optional
from azure.core.credentials import TokenCredential
class SearchIndexClient(HeadersMixin):
"""A client to interact with Azure search service index.
:param endpoint: The URL endpoint of an Azure search service
:type endpoint: str
:param credential: A credential to authorize search client requests
:type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential
:keyword str api_version: The Search API version to use for requests.
"""
_ODATA_ACCEPT = "application/json;odata.metadata=minimal" # type: str
def __init__(self, endpoint, credential, **kwargs):
# type: (str, Union[AzureKeyCredential, TokenCredential], **Any) -> None
self._api_version = kwargs.pop("api_version", DEFAULT_VERSION)
self._endpoint = normalize_endpoint(endpoint) # type: str
self._credential = credential
if isinstance(credential, AzureKeyCredential):
self._aad = False
self._client = _SearchServiceClient(
endpoint=endpoint,
sdk_moniker=SDK_MONIKER,
api_version=self._api_version,
**kwargs
) # type: _SearchServiceClient
else:
self._aad = True
authentication_policy = get_authentication_policy(credential)
self._client = _SearchServiceClient(
endpoint=endpoint,
authentication_policy=authentication_policy,
sdk_moniker=SDK_MONIKER,
api_version=self._api_version,
**kwargs
) # type: _SearchServiceClient
def __enter__(self):
# type: () -> SearchIndexClient
self._client.__enter__() # pylint:disable=no-member
return self
def __exit__(self, *args):
# type: (*Any) -> None
return self._client.__exit__(*args) # pylint:disable=no-member
def close(self):
# type: () -> None
"""Close the :class:`~azure.search.documents.indexes.SearchIndexClient` session.
"""
return self._client.close()
def get_search_client(self, index_name, **kwargs):
# type: (str, dict) -> SearchClient
"""Return a client to perform operations on Search
:param index_name: The name of the Search Index
:type index_name: str
:rtype: ~azure.search.documents.SearchClient
"""
return SearchClient(self._endpoint, index_name, self._credential, **kwargs)
@distributed_trace
def list_indexes(self, **kwargs):
# type: (**Any) -> ItemPaged[SearchIndex]
"""List the indexes in an Azure Search service.
:return: List of indexes
:rtype: ~azure.core.paging.ItemPaged[~azure.search.documents.indexes.models.SearchIndex]
:raises: ~azure.core.exceptions.HttpResponseError
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
# pylint:disable=protected-access
return self._client.indexes.list(cls=lambda objs: [SearchIndex._from_generated(x) for x in objs], **kwargs)
@distributed_trace
def list_index_names(self, **kwargs):
# type: (**Any) -> ItemPaged[str]
"""List the index names in an Azure Search service.
:return: List of index names
:rtype: ~azure.core.paging.ItemPaged[str]
:raises: ~azure.core.exceptions.HttpResponseError
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
return self._client.indexes.list(cls=lambda objs: [x.name for x in objs], **kwargs)
@distributed_trace
def get_index(self, name, **kwargs):
# type: (str, **Any) -> SearchIndex
"""
:param name: The name of the index to retrieve.
:type name: str
:return: SearchIndex object
:rtype: ~azure.search.documents.indexes.models.SearchIndex
:raises: ~azure.core.exceptions.HttpResponseError
.. admonition:: Example:
.. literalinclude:: ../samples/sample_index_crud_operations.py
:start-after: [START get_index]
:end-before: [END get_index]
:language: python
:dedent: 4
:caption: Get an index.
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
result = self._client.indexes.get(name, **kwargs)
return SearchIndex._from_generated(result) # pylint:disable=protected-access
@distributed_trace
def get_index_statistics(self, index_name, **kwargs):
# type: (str, **Any) -> dict
"""Returns statistics for the given index, including a document count
and storage usage.
:param index_name: The name of the index to retrieve.
:type index_name: str
:return: Statistics for the given index, including a document count and storage usage.
:rtype: dict
:raises: ~azure.core.exceptions.HttpResponseError
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
result = self._client.indexes.get_statistics(index_name, **kwargs)
return result.as_dict()
@distributed_trace
def delete_index(self, index, **kwargs):
# type: (Union[str, SearchIndex], **Any) -> None
"""Deletes a search index and all the documents it contains. The model must be
provided instead of the name to use the access conditions.
:param index: The index to retrieve.
:type index: str or ~azure.search.documents.indexes.models.SearchIndex
:keyword match_condition: The match condition to use upon the etag
:type match_condition: ~azure.core.MatchConditions
:raises: ~azure.core.exceptions.HttpResponseError
.. admonition:: Example:
.. literalinclude:: ../samples/sample_index_crud_operations.py
:start-after: [START delete_index]
:end-before: [END delete_index]
:language: python
:dedent: 4
:caption: Delete an index.
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
error_map, access_condition = get_access_conditions(
index, kwargs.pop("match_condition", MatchConditions.Unconditionally)
)
kwargs.update(access_condition)
try:
index_name = index.name
except AttributeError:
index_name = index
self._client.indexes.delete(
index_name=index_name, error_map=error_map, **kwargs
)
@distributed_trace
def create_index(self, index, **kwargs):
# type: (SearchIndex, **Any) -> SearchIndex
"""Creates a new search index.
:param index: The index object.
:type index: ~azure.search.documents.indexes.models.SearchIndex
:return: The index created
:rtype: ~azure.search.documents.indexes.models.SearchIndex
:raises: ~azure.core.exceptions.HttpResponseError
.. admonition:: Example:
.. literalinclude:: ../samples/sample_index_crud_operations.py
:start-after: [START create_index]
:end-before: [END create_index]
:language: python
:dedent: 4
:caption: Creating a new index.
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
patched_index = index._to_generated() # pylint:disable=protected-access
result = self._client.indexes.create(patched_index, **kwargs)
return SearchIndex._from_generated(result) # pylint:disable=protected-access
@distributed_trace
def create_or_update_index(
self, index, allow_index_downtime=None, **kwargs
):
# type: (SearchIndex, bool, **Any) -> SearchIndex
"""Creates a new search index or updates an index if it already exists.
:param index: The index object.
:type index: ~azure.search.documents.indexes.models.SearchIndex
:param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters
to be added to an index by taking the index offline for at least a few seconds. This
temporarily causes indexing and query requests to fail. Performance and write availability of
the index can be impaired for several minutes after the index is updated, or longer for very
large indexes.
:type allow_index_downtime: bool
:keyword match_condition: The match condition to use upon the etag
:type match_condition: ~azure.core.MatchConditions
:return: The index created or updated
:rtype: :class:`~azure.search.documents.indexes.models.SearchIndex`
:raises: :class:`~azure.core.exceptions.ResourceNotFoundError`, \
:class:`~azure.core.exceptions.ResourceModifiedError`, \
:class:`~azure.core.exceptions.ResourceNotModifiedError`, \
:class:`~azure.core.exceptions.ResourceNotFoundError`, \
:class:`~azure.core.exceptions.ResourceExistsError`
.. admonition:: Example:
.. literalinclude:: ../samples/sample_index_crud_operations.py
:start-after: [START update_index]
:end-before: [END update_index]
:language: python
:dedent: 4
:caption: Update an index.
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
error_map, access_condition = get_access_conditions(
index, kwargs.pop("match_condition", MatchConditions.Unconditionally)
)
kwargs.update(access_condition)
patched_index = index._to_generated() # pylint:disable=protected-access
result = self._client.indexes.create_or_update(
index_name=index.name,
index=patched_index,
allow_index_downtime=allow_index_downtime,
error_map=error_map,
**kwargs
)
return SearchIndex._from_generated(result) # pylint:disable=protected-access
@distributed_trace
def analyze_text(self, index_name, analyze_request, **kwargs):
# type: (str, AnalyzeTextOptions, **Any) -> AnalyzeResult
"""Shows how an analyzer breaks text into tokens.
:param index_name: The name of the index for which to test an analyzer.
:type index_name: str
:param analyze_request: The text and analyzer or analysis components to test.
:type analyze_request: ~azure.search.documents.indexes.models.AnalyzeTextOptions
:return: AnalyzeResult
:rtype: ~azure.search.documents.indexes.models.AnalyzeResult
:raises: ~azure.core.exceptions.HttpResponseError
.. admonition:: Example:
.. literalinclude:: ../samples/sample_analyze_text.py
:start-after: [START simple_analyze_text]
:end-before: [END simple_analyze_text]
:language: python
:dedent: 4
:caption: Analyze text
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
result = self._client.indexes.analyze(
index_name=index_name, request=analyze_request._to_analyze_request(), **kwargs # pylint:disable=protected-access
)
return result
@distributed_trace
def get_synonym_maps(self, **kwargs):
# type: (**Any) -> List[SynonymMap]
"""List the Synonym Maps in an Azure Search service.
:return: List of synonym maps
:rtype: list[~azure.search.documents.indexes.models.SynonymMap]
:raises: ~azure.core.exceptions.HttpResponseError
.. admonition:: Example:
.. literalinclude:: ../samples/sample_synonym_map_operations.py
:start-after: [START get_synonym_maps]
:end-before: [END get_synonym_maps]
:language: python
:dedent: 4
:caption: List Synonym Maps
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
result = self._client.synonym_maps.list(**kwargs)
# pylint:disable=protected-access
return [SynonymMap._from_generated(x) for x in result.synonym_maps]
@distributed_trace
def get_synonym_map_names(self, **kwargs):
# type: (**Any) -> List[str]
"""List the Synonym Map names in an Azure Search service.
:return: List of synonym maps
:rtype: list[str]
:raises: ~azure.core.exceptions.HttpResponseError
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
result = self._client.synonym_maps.list(**kwargs)
return [x.name for x in result.synonym_maps]
@distributed_trace
def get_synonym_map(self, name, **kwargs):
# type: (str, **Any) -> SynonymMap
"""Retrieve a named Synonym Map in an Azure Search service
:param name: The name of the Synonym Map to get
:type name: str
:return: The retrieved Synonym Map
:rtype: :class:`~azure.search.documents.indexes.models.SynonymMap`
:raises: :class:`~azure.core.exceptions.ResourceNotFoundError`
.. admonition:: Example:
.. literalinclude:: ../samples/sample_synonym_map_operations.py
:start-after: [START get_synonym_map]
:end-before: [END get_synonym_map]
:language: python
:dedent: 4
:caption: Get a Synonym Map
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
result = self._client.synonym_maps.get(name, **kwargs)
return SynonymMap._from_generated(result) # pylint:disable=protected-access
@distributed_trace
def delete_synonym_map(self, synonym_map, **kwargs):
# type: (Union[str, SynonymMap], **Any) -> None
"""Delete a named Synonym Map in an Azure Search service. To use access conditions,
the SynonymMap model must be provided instead of the name. It is enough to provide
the name of the synonym map to delete unconditionally.
:param name: The Synonym Map to delete
:type name: str or ~azure.search.documents.indexes.models.SynonymMap
:keyword match_condition: The match condition to use upon the etag
:type match_condition: ~azure.core.MatchConditions
:return: None
:rtype: None
.. admonition:: Example:
.. literalinclude:: ../samples/sample_synonym_map_operations.py
:start-after: [START delete_synonym_map]
:end-before: [END delete_synonym_map]
:language: python
:dedent: 4
:caption: Delete a Synonym Map
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
error_map, access_condition = get_access_conditions(
synonym_map, kwargs.pop("match_condition", MatchConditions.Unconditionally)
)
kwargs.update(access_condition)
try:
name = synonym_map.name
except AttributeError:
name = synonym_map
self._client.synonym_maps.delete(
synonym_map_name=name, error_map=error_map, **kwargs
)
@distributed_trace
def create_synonym_map(self, synonym_map, **kwargs):
# type: (SynonymMap, **Any) -> SynonymMap
"""Create a new Synonym Map in an Azure Search service
:param synonym_map: The Synonym Map object
:type synonym_map: ~azure.search.documents.indexes.models.SynonymMap
:return: The created Synonym Map
:rtype: ~azure.search.documents.indexes.models.SynonymMap
.. admonition:: Example:
.. literalinclude:: ../samples/sample_synonym_map_operations.py
:start-after: [START create_synonym_map]
:end-before: [END create_synonym_map]
:language: python
:dedent: 4
:caption: Create a Synonym Map
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
patched_synonym_map = synonym_map._to_generated() # pylint:disable=protected-access
result = self._client.synonym_maps.create(patched_synonym_map, **kwargs)
return SynonymMap._from_generated(result) # pylint:disable=protected-access
@distributed_trace
def create_or_update_synonym_map(self, synonym_map, **kwargs):
# type: (SynonymMap, **Any) -> SynonymMap
"""Create a new Synonym Map in an Azure Search service, or update an
existing one.
:param synonym_map: The Synonym Map object
:type synonym_map: ~azure.search.documents.indexes.models.SynonymMap
:keyword match_condition: The match condition to use upon the etag
:type match_condition: ~azure.core.MatchConditions
:return: The created or updated Synonym Map
:rtype: ~azure.search.documents.indexes.models.SynonymMap
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
error_map, access_condition = get_access_conditions(
synonym_map, kwargs.pop("match_condition", MatchConditions.Unconditionally)
)
kwargs.update(access_condition)
patched_synonym_map = synonym_map._to_generated() # pylint:disable=protected-access
result = self._client.synonym_maps.create_or_update(
synonym_map_name=synonym_map.name,
synonym_map=patched_synonym_map,
error_map=error_map,
**kwargs
)
return SynonymMap._from_generated(result) # pylint:disable=protected-access
@distributed_trace
def get_service_statistics(self, **kwargs):
# type: (**Any) -> dict
"""Get service level statistics for a search service.
"""
kwargs["headers"] = self._merge_client_headers(kwargs.get("headers"))
result = self._client.get_service_statistics(**kwargs)
return result.as_dict()
| mit |
tectronics/browserscope | test/test_result.py | 9 | 5867 | #!/usr/bin/python2.5
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared Models Unit Tests."""
__author__ = 'elsigh@google.com (Lindsey Simon)'
import new
import unittest
from google.appengine.ext import db
from categories import all_test_sets
from categories import test_set_params
from base import manage_dirty
from models import result_stats
from models.result import ResultParent
from models.result import ResultTime
import mock_data
def AddAdjustResults(test_set):
def AdjustResults(self, results):
for values in results.values():
# Add the raw value to be expando'd and store a munged value in score.
values['expando'] = values['raw_score']
values['raw_score'] = int(round(values['raw_score'] / 2.0))
return results
test_set.AdjustResults = new.instancemethod(
AdjustResults, test_set, test_set.__class__)
class ResultTest(unittest.TestCase):
def setUp(self):
self.test_set = mock_data.MockTestSet()
all_test_sets.AddTestSet(self.test_set)
def tearDown(self):
all_test_sets.RemoveTestSet(self.test_set)
def testGetMedianAndNumScores(self):
for scores in ((0, 0, 500), (1, 1, 200),
(0, 2, 300), (1, 3, 100), (0, 4, 400)):
parent = ResultParent.AddResult(
self.test_set,
'12.2.2.25',
mock_data.GetUserAgentString('Firefox 3.5'),
'apple=%s,banana=%s,coconut=%s' % scores)
rankers = self.test_set.GetRankers('Firefox 3')
self.assertEqual([(0, 5), (2, 5), (300, 5)],
[ranker.GetMedianAndNumScores() for ranker in rankers])
def testGetMedianAndNumScoresWithParams(self):
params = test_set_params.Params('w-params', 'a=b', 'c=d', 'e=f')
self.test_set.default_params = params
for scores in ((1, 0, 2), (1, 1, 1), (0, 2, 200)):
parent = ResultParent.AddResult(
self.test_set,
'12.2.2.25',
mock_data.GetUserAgentString('Firefox 3.5'),
'apple=%s,banana=%s,coconut=%s' % scores,
params_str=str(params))
ranker = self.test_set.GetTest('coconut').GetRanker('Firefox 3')
self.assertEqual((2, 3), ranker.GetMedianAndNumScores())
def testAddResult(self):
parent = ResultParent.AddResult(
self.test_set, '12.2.2.25', mock_data.GetUserAgentString('Firefox 3.5'),
'apple=1,banana=11,coconut=111')
expected_results = {
'apple': 1,
'banana': 11,
'coconut': 111,
}
self.assertEqual(expected_results, parent.GetResults())
def testAddResultForTestSetWithAdjustResults(self):
AddAdjustResults(self.test_set)
parent = ResultParent.AddResult(
self.test_set, '12.2.2.25', mock_data.GetUserAgentString('Firefox 3.5'),
'apple=0,banana=80,coconut=200')
self.assertEqual(0, parent.apple)
self.assertEqual(80, parent.banana)
self.assertEqual(200, parent.coconut)
expected_results = {
'apple': 0,
'banana': 40,
'coconut': 100,
}
self.assertEqual(expected_results, parent.GetResults())
def testAddResultWithExpando(self):
AddAdjustResults(self.test_set)
parent = ResultParent.AddResult(
self.test_set, '12.2.2.25', mock_data.GetUserAgentString('Firefox 3.5'),
'apple=1,banana=49,coconut=499')
self.assertEqual(1, parent.apple)
self.assertEqual(49, parent.banana)
self.assertEqual(499, parent.coconut)
expected_results = {
'apple': 1,
'banana': 25,
'coconut': 250,
}
self.assertEqual(expected_results, parent.GetResults())
def testAddResultWithParamsLiteralNoneRaises(self):
# A params_str with a None value is fine, but not a 'None' string
ua_string = (
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 6.0; Trident/4.0; '
'chromeframe; SLCC1; .NET CLR 2.0.5077; 3.0.30729),gzip(gfe),gzip(gfe)')
self.assertRaises(
ValueError,
ResultParent.AddResult, self.test_set, '12.1.1.1', ua_string,
'testDisplay=500,testVisibility=200', params_str='None')
def testGetStatsDataWithParamsEmptyStringRaises(self):
ua_string = (
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 6.0; Trident/4.0; '
'chromeframe; SLCC1; .NET CLR 2.0.5077; 3.0.30729),gzip(gfe),gzip(gfe)')
self.assertRaises(
ValueError,
ResultParent.AddResult, self.test_set, '12.1.1.1', ua_string,
'testDisplay=500,testVisibility=200', params_str='')
class ChromeFrameAddResultTest(unittest.TestCase):
def setUp(self):
self.test_set = mock_data.MockTestSet()
all_test_sets.AddTestSet(self.test_set)
def tearDown(self):
all_test_sets.RemoveTestSet(self.test_set)
def testAddResult(self):
chrome_ua_string = ('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) '
'AppleWebKit/530.1 (KHTML, like Gecko) '
'Chrome/2.0.169.1 Safari/530.1')
ua_string = (
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 6.0; Trident/4.0; '
'chromeframe; SLCC1; .NET CLR 2.0.5077; 3.0.30729),gzip(gfe),gzip(gfe)')
parent = ResultParent.AddResult(
self.test_set, '12.2.2.25', ua_string, 'apple=1,banana=3,coconut=500',
js_user_agent_string=chrome_ua_string)
self.assertEqual(chrome_ua_string,
parent.user_agent.js_user_agent_string)
| apache-2.0 |
j00bar/ansible | lib/ansible/modules/network/panos/panos_admpwd.py | 78 | 5971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_admpwd
short_description: change admin password of PAN-OS device using SSH with SSH key
description:
- Change the admin password of PAN-OS via SSH using a SSH key for authentication.
- Useful for AWS instances where the first login should be done via SSH.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- paramiko
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
username:
description:
- username for initial authentication
required: false
default: "admin"
key_filename:
description:
- filename of the SSH Key to use for authentication
required: true
newpassword:
description:
- password to configure for admin on the PAN-OS device
required: true
'''
EXAMPLES = '''
# Tries for 10 times to set the admin password of 192.168.1.1 to "badpassword"
# via SSH, authenticating using key /tmp/ssh.key
- name: set admin password
panos_admpwd:
ip_address: "192.168.1.1"
username: "admin"
key_filename: "/tmp/ssh.key"
newpassword: "badpassword"
register: result
until: not result|failed
retries: 10
delay: 30
'''
RETURN = '''
status:
description: success status
returned: success
type: string
sample: "Last login: Fri Sep 16 11:09:20 2016 from 10.35.34.56.....Configuration committed successfully"
'''
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
import time
import sys
try:
import paramiko
HAS_LIB=True
except ImportError:
HAS_LIB=False
_PROMPTBUFF = 4096
def wait_with_timeout(module, shell, prompt, timeout=60):
now = time.time()
result = ""
while True:
if shell.recv_ready():
result += shell.recv(_PROMPTBUFF)
endresult = result.strip()
if len(endresult) != 0 and endresult[-1] == prompt:
break
if time.time()-now > timeout:
module.fail_json(msg="Timeout waiting for prompt")
return result
def set_panwfw_password(module, ip_address, key_filename, newpassword, username):
stdout = ""
ssh = paramiko.SSHClient()
# add policy to accept all host keys, I haven't found
# a way to retrieve the instance SSH key fingerprint from AWS
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip_address, username=username, key_filename=key_filename)
shell = ssh.invoke_shell()
# wait for the shell to start
buff = wait_with_timeout(module, shell, ">")
stdout += buff
# step into config mode
shell.send('configure\n')
# wait for the config prompt
buff = wait_with_timeout(module, shell, "#")
stdout += buff
if module.check_mode:
# exit and close connection
shell.send('exit\n')
ssh.close()
return False, 'Connection test successful. Password left intact.'
# set admin password
shell.send('set mgt-config users ' + username + ' password\n')
# wait for the password prompt
buff = wait_with_timeout(module, shell, ":")
stdout += buff
# enter password for the first time
shell.send(newpassword+'\n')
# wait for the password prompt
buff = wait_with_timeout(module, shell, ":")
stdout += buff
# enter password for the second time
shell.send(newpassword+'\n')
# wait for the config mode prompt
buff = wait_with_timeout(module, shell, "#")
stdout += buff
# commit !
shell.send('commit\n')
# wait for the prompt
buff = wait_with_timeout(module, shell, "#", 120)
stdout += buff
if 'success' not in buff:
module.fail_json(msg="Error setting " + username + " password: " + stdout)
# exit
shell.send('exit\n')
ssh.close()
return True, stdout
def main():
argument_spec = dict(
ip_address=dict(required=True),
username=dict(default='admin'),
key_filename=dict(required=True),
newpassword=dict(no_log=True, required=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_LIB:
module.fail_json(msg='paramiko is required for this module')
ip_address = module.params["ip_address"]
if not ip_address:
module.fail_json(msg="ip_address should be specified")
key_filename = module.params["key_filename"]
if not key_filename:
module.fail_json(msg="key_filename should be specified")
newpassword = module.params["newpassword"]
if not newpassword:
module.fail_json(msg="newpassword is required")
username = module.params['username']
try:
changed, stdout = set_panwfw_password(module, ip_address, key_filename, newpassword, username)
module.exit_json(changed=changed, stdout=stdout)
except Exception:
x = sys.exc_info()[1]
module.fail_json(msg=x)
if __name__ == '__main__':
main()
| gpl-3.0 |
malderete/django-jsonvalidator | jsonvalidator/tests.py | 1 | 5657 | import json
from django.http import HttpResponse
from django.test import RequestFactory, TestCase
from .decorators import json_validator
JSON_SCHEMA = {
"type" : "object",
"required": [
'status', 'card_type', 'request_type', 'products',
'payment_type', 'total_value', 'distributor_id', 'address_id',
'cash_value'
],
"properties" : {
"status" : {"type" : "string"},
"card_type" : {"type" : "string"},
"request_type" : {"type" : "string"},
"payment_type" : {"type" : "string"},
"total_value": {"type" : "string"},
"address_id": {"type" : "string"},
"distributor_id": {"type" : "string"},
"cash_value": {"type" : "string"},
"products": {
"type" : "array",
"items" : {
"type" : "object",
"required": [
"price","product_distributor_id", "quantity"
],
"properties" : {
"price" : {"type" : "string"},
"product_distributor_id" : {"type" : "string"},
"quantity" : {"type" : "string"},
}
}
},
}
}
# Utils
get_from_body = lambda r: r.body
get_from_form_field = lambda r: r.POST.get('c_data')
@json_validator(JSON_SCHEMA, get_from_body)
def test_for_post_view(request):
return HttpResponse('OK')
@json_validator(JSON_SCHEMA, get_from_form_field)
def test_for_post_form_view(request):
return HttpResponse('OK')
@json_validator(JSON_SCHEMA, get_from_form_field,
attach_to_request=True)
def test_for_post_form_view_attach_to(request):
data = json.dumps(request.json_valid)
return HttpResponse(data, status=200, content_type='application/json')
def on_error_callback(request, err):
return HttpResponse("Invalid JSON", status=400)
def on_invalid_callback(request, errors):
data = json.dumps(dict(errors=errors))
return HttpResponse(data, status=200, content_type='application/json')
@json_validator(JSON_SCHEMA, get_from_form_field,
on_error_callback=on_error_callback)
def test_for_on_error_callback(request):
return "Never get here"
@json_validator(JSON_SCHEMA, get_from_form_field,
on_invalid_callback=on_invalid_callback)
def test_for_on_invalid_callback(request):
return "Never get here"
class JsonSchemaTests(TestCase):
def setUp(self):
self.content_type="application/json"
# Every test needs access to the request factory.
self.factory = RequestFactory()
# Data to test
self.valid_data = {
"status": "A",
"card_type": "A",
"request_type": "C",
"products": [
{
"price": "1.50",
"product_distributor_id": "449",
"quantity": "1"
}
],
"payment_type": "D",
"total_value": "1000",
"address_id": "624",
"distributor_id": "107",
"cash_value": "100"
}
self.invalid_data = {
"card_type": "A",
"products": [
{
"price": "1.50",
"product_distributor_id": "449",
"quantity": "1"
}
],
"payment_type": "D",
"total_value": "1000",
"address_id": "624",
"distributor_id": "107",
"cash_value": "100"
}
def test_body_post_valid_schema(self):
data = json.dumps(self.valid_data)
request = self.factory.post('/', data, content_type=self.content_type)
response = test_for_post_view(request)
self.assertEquals(response.status_code, 200)
def test_body_post_invalid_schema(self):
data = json.dumps(self.invalid_data)
request = self.factory.post('/', data, content_type=self.content_type)
response = test_for_post_view(request)
self.assertEquals(response.status_code, 400)
def test_form_post_valid_schema(self):
data = json.dumps(self.valid_data)
request = self.factory.post('/', dict(c_data=data))
response = test_for_post_form_view(request)
self.assertEquals(response.status_code, 200)
def test_form_post_invalid_schema(self):
data = json.dumps(self.invalid_data)
request = self.factory.post('/', dict(c_data=data))
response = test_for_post_form_view(request)
self.assertEquals(response.status_code, 400)
def test_custom_on_invalid_callback(self):
data = json.dumps(self.invalid_data)
request = self.factory.post('/', dict(c_data=data))
response = test_for_on_invalid_callback(request)
response_json = json.loads(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(len(response_json['errors']), 2)
def test_custom_on_error_callback(self):
data = "[{}, 23234345]#123234fsdgdg45]"
request = self.factory.post('/', dict(c_data=data))
response = test_for_on_error_callback(request)
self.assertEquals(response.status_code, 400)
self.assertEquals(response.content, "Invalid JSON")
def test_attach_to_request(self):
data = json.dumps(self.valid_data)
request = self.factory.post('/', dict(c_data=data))
response = test_for_post_form_view_attach_to(request)
response_json = json.loads(response.content)
self.assertEquals(response.status_code, 200)
self.assertEquals(isinstance(response_json, dict), True)
| mit |
rezoo/chainer | chainermn/communicators/hierarchical_communicator.py | 3 | 2965 | import chainer.cuda
import math
from chainermn.communicators import _communication_utility
from chainermn.communicators import _memory_utility
from chainermn.communicators import mpi_communicator_base
from chainermn import nccl
class HierarchicalCommunicator(mpi_communicator_base.MpiCommunicatorBase):
def __init__(self, mpi_comm):
super(HierarchicalCommunicator, self).__init__(mpi_comm)
if not nccl._available:
raise RuntimeError(
'NCCL is not available. '
'Please confirm that NCCL is enabled in CuPy.'
)
# We have to delay the initialization of communicators. This is because
# NCCL's communicators use the current CUDA devices at the time of
# initialization. Therefore, we have to initialize NCCL communicators
# after users set the devices to use.
self.inter_mpi_comm = None
self.intra_nccl_comm = None
self.gpu_buffer_a = _memory_utility.DeviceMemory()
self.gpu_buffer_b = _memory_utility.DeviceMemory()
def _init_comms(self):
if self.inter_mpi_comm is not None:
assert self.intra_nccl_comm is not None
return
intra_mpi_comm = _communication_utility.init_intra_mpi_comm(
self.mpi_comm, self.intra_rank, self.inter_rank)
self.inter_mpi_comm = _communication_utility.init_inter_mpi_comm(
self.mpi_comm, self.intra_rank, self.inter_rank)
self.intra_nccl_comm = _communication_utility.init_nccl_comm(
intra_mpi_comm)
def allreduce_grad(self, model):
self._init_comms()
stream = chainer.cuda.Stream.null
params = _memory_utility.extract_params_set_grad(model)
itemsize = 4
n_elems_total = sum(param.grad.size for param in params)
n_elems_per_node = int(math.ceil(n_elems_total / self.inter_size))
n_bytes_per_node = n_elems_per_node * itemsize
n_bytes_buffer = n_bytes_per_node * self.inter_size
self.gpu_buffer_a.assign(n_bytes_buffer)
self.gpu_buffer_b.assign(n_bytes_buffer)
_memory_utility.pack_params(
params, itemsize, 'grad', self.gpu_buffer_a)
# Intra-node reduce
self.intra_nccl_comm.reduce(
self.gpu_buffer_a.ptr(), self.gpu_buffer_b.ptr(), n_elems_total,
nccl.NCCL_FLOAT, nccl.NCCL_SUM, 0, stream.ptr)
# Inter-node allreduce
if self.intra_rank == 0:
_communication_utility.inter_allreduce_gpu(
self.inter_mpi_comm, self.size,
self.gpu_buffer_a, self.gpu_buffer_b,
n_bytes_buffer, n_elems_per_node, n_bytes_per_node, stream)
# Intra-node bcast
self.intra_nccl_comm.bcast(
self.gpu_buffer_b.ptr(), n_elems_total, nccl.NCCL_FLOAT, 0,
stream.ptr)
_memory_utility.unpack_params(
params, itemsize, 'grad', self.gpu_buffer_b)
| mit |
jagg81/translate-toolkit | build/lib.linux-x86_64-2.6/translate/lang/te.py | 4 | 1091 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""This module represents Telugu language.
For more information, see U{http://en.wikipedia.org/wiki/Telugu_language}
"""
from translate.lang import common
class te(common.Common):
"""This class represents Telugu."""
ignoretests = ["startcaps", "simplecaps"]
| gpl-2.0 |
opnsense/core | src/opnsense/scripts/netflow/lib/aggregate.py | 1 | 4679 | """
Copyright (c) 2016-2018 Ad Schellevis <ad@opnsense.org>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
aggregate flow data (format in parse.py) into sqlite structured container per type/resolution.
Implementations are collected in lib\aggregates\
"""
import os
import datetime
import sqlite3
def convert_timestamp(val):
""" convert timestamps from string (internal sqlite type) or seconds since epoch
"""
if val.find(b'-') > -1:
# formatted date/time
if val.find(b" ") > -1:
datepart, timepart = val.split(b" ")
else:
datepart = val
timepart = b"0:0:0,0"
year, month, day = list(map(int, datepart.split(b"-")))
timepart_full = timepart.split(b".")
hours, minutes, seconds = list(map(int, timepart_full[0].split(b":")))
if len(timepart_full) == 2:
microseconds = int('{:0<6.6}'.format(timepart_full[1].decode()))
else:
microseconds = 0
val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
else:
# timestamp stored as seconds since epoch, convert to utc
val = datetime.datetime.utcfromtimestamp(float(val))
return val
sqlite3.register_converter('timestamp', convert_timestamp)
class AggMetadata(object):
""" store some metadata needed to keep track of parse progress
"""
def __init__(self, database_dir='/var/netflow'):
self._filename = '%s/metadata.sqlite' % database_dir
# make sure the target directory exists
target_path = os.path.dirname(self._filename)
if not os.path.isdir(target_path):
os.makedirs(target_path)
# open sqlite database and cursor
self._db_connection = sqlite3.connect(self._filename, timeout=60,
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
self._db_cursor = self._db_connection.cursor()
# known tables
self._tables = list()
# cache known tables
self._update_known_tables()
def __del__(self):
""" close database on destruct
:return: None
"""
if self._db_connection is not None:
self._db_connection.close()
def _update_known_tables(self):
""" request known tables
"""
self._db_cursor.execute('SELECT name FROM sqlite_master')
for record in self._db_cursor.fetchall():
self._tables.append(record[0])
def update_sync_time(self, timestamp):
""" update (last) sync timestamp
"""
if 'sync_timestamp' not in self._tables:
self._db_cursor.execute('create table sync_timestamp(mtime timestamp)')
self._db_cursor.execute('insert into sync_timestamp(mtime) values(0)')
self._db_connection.commit()
self._update_known_tables()
# update last sync timestamp, if this date > timestamp
self._db_cursor.execute('update sync_timestamp set mtime = :mtime where mtime < :mtime', {'mtime': timestamp})
self._db_connection.commit()
def last_sync(self):
if 'sync_timestamp' not in self._tables:
return 0.0
else:
self._db_cursor.execute('select max(mtime) from sync_timestamp')
return self._db_cursor.fetchall()[0][0]
| bsd-2-clause |
letouriste001/SmartForest_2.0 | python3.4Smartforest/lib/python3.4/site-packages/pip/_vendor/requests/packages/urllib3/contrib/appengine.py | 154 | 7938 | from __future__ import absolute_import
import logging
import os
import warnings
from ..exceptions import (
HTTPError,
HTTPWarning,
MaxRetryError,
ProtocolError,
TimeoutError,
SSLError
)
from ..packages.six import BytesIO
from ..request import RequestMethods
from ..response import HTTPResponse
from ..util.timeout import Timeout
from ..util.retry import Retry
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
log = logging.getLogger(__name__)
class AppEnginePlatformWarning(HTTPWarning):
pass
class AppEnginePlatformError(HTTPError):
pass
class AppEngineManager(RequestMethods):
"""
Connection manager for Google App Engine sandbox applications.
This manager uses the URLFetch service directly instead of using the
emulated httplib, and is subject to URLFetch limitations as described in
the App Engine documentation here:
https://cloud.google.com/appengine/docs/python/urlfetch
Notably it will raise an AppEnginePlatformError if:
* URLFetch is not available.
* If you attempt to use this on GAEv2 (Managed VMs), as full socket
support is available.
* If a request size is more than 10 megabytes.
* If a response size is more than 32 megabtyes.
* If you use an unsupported request method such as OPTIONS.
Beyond those cases, it will raise normal urllib3 errors.
"""
def __init__(self, headers=None, retries=None, validate_certificate=True):
if not urlfetch:
raise AppEnginePlatformError(
"URLFetch is not available in this environment.")
if is_prod_appengine_mvms():
raise AppEnginePlatformError(
"Use normal urllib3.PoolManager instead of AppEngineManager"
"on Managed VMs, as using URLFetch is not necessary in "
"this environment.")
warnings.warn(
"urllib3 is using URLFetch on Google App Engine sandbox instead "
"of sockets. To use sockets directly instead of URLFetch see "
"https://urllib3.readthedocs.org/en/latest/contrib.html.",
AppEnginePlatformWarning)
RequestMethods.__init__(self, headers)
self.validate_certificate = validate_certificate
self.retries = retries or Retry.DEFAULT
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Return False to re-raise any potential exceptions
return False
def urlopen(self, method, url, body=None, headers=None,
retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
**response_kw):
retries = self._get_retries(retries, redirect)
try:
response = urlfetch.fetch(
url,
payload=body,
method=method,
headers=headers or {},
allow_truncated=False,
follow_redirects=(
redirect and
retries.redirect != 0 and
retries.total),
deadline=self._get_absolute_timeout(timeout),
validate_certificate=self.validate_certificate,
)
except urlfetch.DeadlineExceededError as e:
raise TimeoutError(self, e)
except urlfetch.InvalidURLError as e:
if 'too large' in str(e):
raise AppEnginePlatformError(
"URLFetch request too large, URLFetch only "
"supports requests up to 10mb in size.", e)
raise ProtocolError(e)
except urlfetch.DownloadError as e:
if 'Too many redirects' in str(e):
raise MaxRetryError(self, url, reason=e)
raise ProtocolError(e)
except urlfetch.ResponseTooLargeError as e:
raise AppEnginePlatformError(
"URLFetch response too large, URLFetch only supports"
"responses up to 32mb in size.", e)
except urlfetch.SSLCertificateError as e:
raise SSLError(e)
except urlfetch.InvalidMethodError as e:
raise AppEnginePlatformError(
"URLFetch does not support method: %s" % method, e)
http_response = self._urlfetch_response_to_http_response(
response, **response_kw)
# Check for redirect response
if (http_response.get_redirect_location() and
retries.raise_on_redirect and redirect):
raise MaxRetryError(self, url, "too many redirects")
# Check if we should retry the HTTP response.
if retries.is_forced_retry(method, status_code=http_response.status):
retries = retries.increment(
method, url, response=http_response, _pool=self)
log.info("Forced retry: %s", url)
retries.sleep()
return self.urlopen(
method, url,
body=body, headers=headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
return http_response
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
if is_prod_appengine():
# Production GAE handles deflate encoding automatically, but does
# not remove the encoding header.
content_encoding = urlfetch_resp.headers.get('content-encoding')
if content_encoding == 'deflate':
del urlfetch_resp.headers['content-encoding']
transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
# We have a full response's content,
# so let's make sure we don't report ourselves as chunked data.
if transfer_encoding == 'chunked':
encodings = transfer_encoding.split(",")
encodings.remove('chunked')
urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
return HTTPResponse(
# In order for decoding to work, we must present the content as
# a file-like object.
body=BytesIO(urlfetch_resp.content),
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
**response_kw
)
def _get_absolute_timeout(self, timeout):
if timeout is Timeout.DEFAULT_TIMEOUT:
return 5 # 5s is the default timeout for URLFetch.
if isinstance(timeout, Timeout):
if timeout._read is not timeout._connect:
warnings.warn(
"URLFetch does not support granular timeout settings, "
"reverting to total timeout.", AppEnginePlatformWarning)
return timeout.total
return timeout
def _get_retries(self, retries, redirect):
if not isinstance(retries, Retry):
retries = Retry.from_int(
retries, redirect=redirect, default=self.retries)
if retries.connect or retries.read or retries.redirect:
warnings.warn(
"URLFetch only supports total retries and does not "
"recognize connect, read, or redirect retry parameters.",
AppEnginePlatformWarning)
return retries
def is_appengine():
return (is_local_appengine() or
is_prod_appengine() or
is_prod_appengine_mvms())
def is_appengine_sandbox():
return is_appengine() and not is_prod_appengine_mvms()
def is_local_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
def is_prod_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
not is_prod_appengine_mvms())
def is_prod_appengine_mvms():
return os.environ.get('GAE_VM', False) == 'true'
| mit |
s-hertel/ansible | test/lib/ansible_test/_internal/coverage/analyze/targets/combine.py | 31 | 2393 | """Combine integration test target code coverage reports."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from .... import types as t
from . import (
CoverageAnalyzeTargetsConfig,
get_target_index,
make_report,
read_report,
write_report,
)
if t.TYPE_CHECKING:
from . import (
Arcs,
IndexedPoints,
Lines,
TargetIndexes,
)
class CoverageAnalyzeTargetsCombineConfig(CoverageAnalyzeTargetsConfig):
"""Configuration for the `coverage analyze targets combine` command."""
def __init__(self, args): # type: (t.Any) -> None
super(CoverageAnalyzeTargetsCombineConfig, self).__init__(args)
self.input_files = args.input_file # type: t.List[str]
self.output_file = args.output_file # type: str
def command_coverage_analyze_targets_combine(args): # type: (CoverageAnalyzeTargetsCombineConfig) -> None
"""Combine integration test target code coverage reports."""
combined_target_indexes = {} # type: TargetIndexes
combined_path_arcs = {} # type: Arcs
combined_path_lines = {} # type: Lines
for report_path in args.input_files:
covered_targets, covered_path_arcs, covered_path_lines = read_report(report_path)
merge_indexes(covered_path_arcs, covered_targets, combined_path_arcs, combined_target_indexes)
merge_indexes(covered_path_lines, covered_targets, combined_path_lines, combined_target_indexes)
report = make_report(combined_target_indexes, combined_path_arcs, combined_path_lines)
write_report(args, report, args.output_file)
def merge_indexes(
source_data, # type: IndexedPoints
source_index, # type: t.List[str]
combined_data, # type: IndexedPoints
combined_index, # type: TargetIndexes
): # type: (...) -> None
"""Merge indexes from the source into the combined data set (arcs or lines)."""
for covered_path, covered_points in source_data.items():
combined_points = combined_data.setdefault(covered_path, {})
for covered_point, covered_target_indexes in covered_points.items():
combined_point = combined_points.setdefault(covered_point, set())
for covered_target_index in covered_target_indexes:
combined_point.add(get_target_index(source_index[covered_target_index], combined_index))
| gpl-3.0 |
kntem/webdeposit | modules/webjournal/lib/elements/bfe_webjournal_main_navigation.py | 25 | 3935 | # -*- coding: utf-8 -*-
## $Id: bfe_webjournal_CERNBulletinMainNavigation.py,v 1.10 2008/06/03 09:52:11 jerome Exp $
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal element - Prints main (horizontal) navigation menu
"""
from invenio.webjournal_utils import \
parse_url_string, \
make_journal_url, \
get_journal_categories
from invenio.urlutils import create_html_link
from invenio.messages import gettext_set_language
def format_element(bfo, category_prefix, category_suffix, separator=" | ",
display_all_categories='no'):
"""
Creates the main navigation menu of the journal
@param category_prefix: value printed before each category
@param category_suffix: value printed after each category
@param separator: value printed between each category
@param display_all_categories: if 'yes', show categories even when there is no corresponding article
"""
# Retrieve context (journal, issue and category) from URI
args = parse_url_string(bfo.user_info['uri'])
journal_name = args["journal_name"]
selected_category = args["category"]
this_issue_number = args["issue"]
ln = bfo.lang
_ = gettext_set_language(ln)
# Retrieve categories for this journal and issue
journal_categories = get_journal_categories(journal_name,
display_all_categories.lower() != 'yes' and \
this_issue_number or None)
# Build the links to categories
categories_links = []
for category in journal_categories:
# Create URL
category_url = make_journal_url(bfo.user_info['uri'],
{'category': category,
'recid': '',
'ln': bfo.lang})
# Create HTML link
linkattrd = {}
if category.lower() == selected_category.lower():
linkattrd = {'class':'selectedNavigationPage'}
if journal_name == 'CERNBulletin' and \
category == 'Training and Development':
category = 'Training'
if ln == 'fr':
category = 'Formations'
category_link = create_html_link(category_url, {},
_(category),
linkattrd=linkattrd)
# Append to list of links
categories_links.append(category_link)
navigation = '<div id="navigationMenu">'
navigation += separator.join([category_prefix + \
category_link + \
category_suffix for category_link \
in categories_links])
navigation += '</div>'
return navigation
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
_ = gettext_set_language('en')
dummy = _("News Articles")
dummy = _("Official News")
dummy = _("Training and Development")
dummy = _("General Information")
dummy = _("Announcements")
dummy = _("Training")
dummy = _("Events")
dummy = _("Staff Association")
| gpl-2.0 |
dav1x/ansible | lib/ansible/modules/windows/win_nssm.py | 26 | 5852 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Heyo
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_nssm
version_added: "2.0"
short_description: NSSM - the Non-Sucking Service Manager
description:
- nssm is a service helper which doesn't suck. See U(https://nssm.cc/) for more information.
requirements:
- "nssm >= 2.24.0 # (install via win_chocolatey) win_chocolatey: name=nssm"
options:
name:
description:
- Name of the service to operate on
required: true
state:
description:
- State of the service on the system
- Note that NSSM actions like "pause", "continue", "rotate" do not fit the declarative style of ansible, so these should be implemented via the
ansible command module
choices:
- present
- started
- stopped
- restarted
- absent
default: started
application:
description:
- The application binary to run as a service
- "Specify this whenever the service may need to be installed (state: present, started, stopped, restarted)"
- "Note that the application name must look like the following, if the directory includes spaces:"
- 'nssm install service "c:\\Program Files\\app.exe\\" "C:\\Path with spaces\\"'
- >
See commit 0b386fc1984ab74ee59b7bed14b7e8f57212c22b in the nssm.git project for more info:
U(https://git.nssm.cc/?p=nssm.git;a=commit;h=0b386fc1984ab74ee59b7bed14b7e8f57212c22b)
stdout_file:
description:
- Path to receive output
stderr_file:
description:
- Path to receive error output
app_parameters:
description:
- Parameters to be passed to the application when it starts.
- Use either this or C(app_parameters_free_form), not both
app_parameters_free_form:
version_added: "2.3.0"
description:
- Single string of parameters to be passed to the service.
- Use either this or C(app_parameters), not both
dependencies:
description:
- Service dependencies that has to be started to trigger startup, separated by comma.
user:
description:
- User to be used for service startup
password:
description:
- Password to be used for service startup
start_mode:
description:
- If C(auto) is selected, the service will start at bootup. C(manual) means that the service will start only when another service needs it.
C(disabled) means that the service will stay off, regardless if it is needed or not.
default: auto
choices:
- auto
- manual
- disabled
author:
- "Adam Keech (@smadam813)"
- "George Frank (@georgefrank)"
- "Hans-Joachim Kliemeck (@h0nIg)"
'''
EXAMPLES = r'''
# Install and start the foo service
- win_nssm:
name: foo
application: C:\windows\foo.exe
# Install and start the foo service with a key-value pair argument
# This will yield the following command: C:\windows\foo.exe bar "true"
- win_nssm:
name: foo
application: C:\windows\foo.exe
app_parameters:
bar: true
# Install and start the foo service with a key-value pair argument, where the argument needs to start with a dash
# This will yield the following command: C:\windows\\foo.exe -bar "true"
- win_nssm:
name: foo
application: C:\windows\foo.exe
app_parameters:
"-bar": true
# Install and start the foo service with a single parameter
# This will yield the following command: C:\windows\\foo.exe bar
- win_nssm:
name: foo
application: C:\windows\foo.exe
app_parameters:
_: bar
# Install and start the foo service with a mix of single params, and key value pairs
# This will yield the following command: C:\windows\\foo.exe bar -file output.bat
- win_nssm:
name: foo
application: C:\windows\foo.exe
app_parameters:
_: bar
"-file": "output.bat"
# Use the single line parameters option to specify an arbitrary string of parameters
# for the service executable
- name: Make sure the Consul service runs
win_nssm:
name: consul
application: C:\consul\consul.exe
app_parameters_free_form: agent -config-dir=C:\consul\config
stdout_file: C:\consul\log.txt
stderr_file: C:\consul\error.txt
# Install and start the foo service, redirecting stdout and stderr to the same file
- win_nssm:
name: foo
application: C:\windows\foo.exe
stdout_file: C:\windows\foo.log
stderr_file: C:\windows\foo.log
# Install and start the foo service, but wait for dependencies tcpip and adf
- win_nssm:
name: foo
application: C:\windows\foo.exe
dependencies: 'adf,tcpip'
# Install and start the foo service with dedicated user
- win_nssm:
name: foo
application: C:\windows\foo.exe
user: foouser
password: secret
# Install the foo service but do not start it automatically
- win_nssm:
name: foo
application: C:\windows\foo.exe
state: present
start_mode: manual
# Remove the foo service
- win_nssm:
name: foo
state: absent
'''
| gpl-3.0 |
nikolas/edx-platform | lms/djangoapps/django_comment_client/utils.py | 43 | 28663 | from collections import defaultdict
from datetime import datetime
import json
import logging
import pytz
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import connection
from django.http import HttpResponse
from django.utils.timezone import UTC
import pystache_custom as pystache
from opaque_keys.edx.locations import i4xEncoder
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from django_comment_common.models import Role, FORUM_ROLE_STUDENT
from django_comment_client.permissions import check_permissions_by_view, has_permission, get_team
from django_comment_client.settings import MAX_COMMENT_DEPTH
from edxmako import lookup_template
from courseware import courses
from courseware.access import has_access
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from openedx.core.djangoapps.course_groups.cohorts import (
get_course_cohort_settings, get_cohort_by_id, get_cohort_id, is_course_cohorted
)
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
log = logging.getLogger(__name__)
def extract(dic, keys):
return {k: dic.get(k) for k in keys}
def strip_none(dic):
return dict([(k, v) for k, v in dic.iteritems() if v is not None])
def strip_blank(dic):
def _is_blank(v):
return isinstance(v, str) and len(v.strip()) == 0
return dict([(k, v) for k, v in dic.iteritems() if not _is_blank(v)])
# TODO should we be checking if d1 and d2 have the same keys with different values?
def merge_dict(dic1, dic2):
return dict(dic1.items() + dic2.items())
def get_role_ids(course_id):
roles = Role.objects.filter(course_id=course_id).exclude(name=FORUM_ROLE_STUDENT)
return dict([(role.name, list(role.users.values_list('id', flat=True))) for role in roles])
def has_discussion_privileges(user, course_id):
"""Returns True if the user is privileged in teams discussions for
this course. The user must be one of Discussion Admin, Moderator,
or Community TA.
Args:
user (User): The user to check privileges for.
course_id (CourseKey): A key for the course to check privileges for.
Returns:
bool
"""
# get_role_ids returns a dictionary of only admin, moderator and community TAs.
roles = get_role_ids(course_id)
for role in roles:
if user.id in roles[role]:
return True
return False
def has_forum_access(uname, course_id, rolename):
try:
role = Role.objects.get(name=rolename, course_id=course_id)
except Role.DoesNotExist:
return False
return role.users.filter(username=uname).exists()
def has_required_keys(module):
"""Returns True iff module has the proper attributes for generating metadata with get_discussion_id_map_entry()"""
for key in ('discussion_id', 'discussion_category', 'discussion_target'):
if getattr(module, key, None) is None:
log.debug("Required key '%s' not in discussion %s, leaving out of category map", key, module.location)
return False
return True
def get_accessible_discussion_modules(course, user, include_all=False): # pylint: disable=invalid-name
"""
Return a list of all valid discussion modules in this course that
are accessible to the given user.
"""
all_modules = modulestore().get_items(course.id, qualifiers={'category': 'discussion'})
return [
module for module in all_modules
if has_required_keys(module) and (include_all or has_access(user, 'load', module, course.id))
]
def get_discussion_id_map_entry(module):
"""
Returns a tuple of (discussion_id, metadata) suitable for inclusion in the results of get_discussion_id_map().
"""
return (
module.discussion_id,
{
"location": module.location,
"title": module.discussion_category.split("/")[-1].strip() + " / " + module.discussion_target
}
)
class DiscussionIdMapIsNotCached(Exception):
"""Thrown when the discussion id map is not cached for this course, but an attempt was made to access it."""
pass
def get_cached_discussion_key(course, discussion_id):
"""
Returns the usage key of the discussion module associated with discussion_id if it is cached. If the discussion id
map is cached but does not contain discussion_id, returns None. If the discussion id map is not cached for course,
raises a DiscussionIdMapIsNotCached exception.
"""
try:
cached_mapping = CourseStructure.objects.get(course_id=course.id).discussion_id_map
if not cached_mapping:
raise DiscussionIdMapIsNotCached()
return cached_mapping.get(discussion_id)
except CourseStructure.DoesNotExist:
raise DiscussionIdMapIsNotCached()
def get_cached_discussion_id_map(course, discussion_ids, user):
"""
Returns a dict mapping discussion_ids to respective discussion module metadata if it is cached and visible to the
user. If not, returns the result of get_discussion_id_map
"""
try:
entries = []
for discussion_id in discussion_ids:
key = get_cached_discussion_key(course, discussion_id)
if not key:
continue
module = modulestore().get_item(key)
if not (has_required_keys(module) and has_access(user, 'load', module, course.id)):
continue
entries.append(get_discussion_id_map_entry(module))
return dict(entries)
except DiscussionIdMapIsNotCached:
return get_discussion_id_map(course, user)
def get_discussion_id_map(course, user):
"""
Transform the list of this course's discussion modules (visible to a given user) into a dictionary of metadata keyed
by discussion_id.
"""
return dict(map(get_discussion_id_map_entry, get_accessible_discussion_modules(course, user)))
def _filter_unstarted_categories(category_map):
now = datetime.now(UTC())
result_map = {}
unfiltered_queue = [category_map]
filtered_queue = [result_map]
while unfiltered_queue:
unfiltered_map = unfiltered_queue.pop()
filtered_map = filtered_queue.pop()
filtered_map["children"] = []
filtered_map["entries"] = {}
filtered_map["subcategories"] = {}
for child in unfiltered_map["children"]:
if child in unfiltered_map["entries"]:
if unfiltered_map["entries"][child]["start_date"] <= now:
filtered_map["children"].append(child)
filtered_map["entries"][child] = {}
for key in unfiltered_map["entries"][child]:
if key != "start_date":
filtered_map["entries"][child][key] = unfiltered_map["entries"][child][key]
else:
log.debug(u"Filtering out:%s with start_date: %s", child, unfiltered_map["entries"][child]["start_date"])
else:
if unfiltered_map["subcategories"][child]["start_date"] < now:
filtered_map["children"].append(child)
filtered_map["subcategories"][child] = {}
unfiltered_queue.append(unfiltered_map["subcategories"][child])
filtered_queue.append(filtered_map["subcategories"][child])
return result_map
def _sort_map_entries(category_map, sort_alpha):
things = []
for title, entry in category_map["entries"].items():
if entry["sort_key"] is None and sort_alpha:
entry["sort_key"] = title
things.append((title, entry))
for title, category in category_map["subcategories"].items():
things.append((title, category))
_sort_map_entries(category_map["subcategories"][title], sort_alpha)
category_map["children"] = [x[0] for x in sorted(things, key=lambda x: x[1]["sort_key"])]
def get_discussion_category_map(course, user, cohorted_if_in_list=False, exclude_unstarted=True):
"""
Transform the list of this course's discussion modules into a recursive dictionary structure. This is used
to render the discussion category map in the discussion tab sidebar for a given user.
Args:
course: Course for which to get the ids.
user: User to check for access.
cohorted_if_in_list (bool): If True, inline topics are marked is_cohorted only if they are
in course_cohort_settings.discussion_topics.
Example:
>>> example = {
>>> "entries": {
>>> "General": {
>>> "sort_key": "General",
>>> "is_cohorted": True,
>>> "id": "i4x-edx-eiorguegnru-course-foobarbaz"
>>> }
>>> },
>>> "children": ["General", "Getting Started"],
>>> "subcategories": {
>>> "Getting Started": {
>>> "subcategories": {},
>>> "children": [
>>> "Working with Videos",
>>> "Videos on edX"
>>> ],
>>> "entries": {
>>> "Working with Videos": {
>>> "sort_key": None,
>>> "is_cohorted": False,
>>> "id": "d9f970a42067413cbb633f81cfb12604"
>>> },
>>> "Videos on edX": {
>>> "sort_key": None,
>>> "is_cohorted": False,
>>> "id": "98d8feb5971041a085512ae22b398613"
>>> }
>>> }
>>> }
>>> }
>>> }
"""
unexpanded_category_map = defaultdict(list)
modules = get_accessible_discussion_modules(course, user)
course_cohort_settings = get_course_cohort_settings(course.id)
for module in modules:
id = module.discussion_id
title = module.discussion_target
sort_key = module.sort_key
category = " / ".join([x.strip() for x in module.discussion_category.split("/")])
# Handle case where module.start is None
entry_start_date = module.start if module.start else datetime.max.replace(tzinfo=pytz.UTC)
unexpanded_category_map[category].append({"title": title, "id": id, "sort_key": sort_key, "start_date": entry_start_date})
category_map = {"entries": defaultdict(dict), "subcategories": defaultdict(dict)}
for category_path, entries in unexpanded_category_map.items():
node = category_map["subcategories"]
path = [x.strip() for x in category_path.split("/")]
# Find the earliest start date for the entries in this category
category_start_date = None
for entry in entries:
if category_start_date is None or entry["start_date"] < category_start_date:
category_start_date = entry["start_date"]
for level in path[:-1]:
if level not in node:
node[level] = {"subcategories": defaultdict(dict),
"entries": defaultdict(dict),
"sort_key": level,
"start_date": category_start_date}
else:
if node[level]["start_date"] > category_start_date:
node[level]["start_date"] = category_start_date
node = node[level]["subcategories"]
level = path[-1]
if level not in node:
node[level] = {"subcategories": defaultdict(dict),
"entries": defaultdict(dict),
"sort_key": level,
"start_date": category_start_date}
else:
if node[level]["start_date"] > category_start_date:
node[level]["start_date"] = category_start_date
always_cohort_inline_discussions = ( # pylint: disable=invalid-name
not cohorted_if_in_list and course_cohort_settings.always_cohort_inline_discussions
)
dupe_counters = defaultdict(lambda: 0) # counts the number of times we see each title
for entry in entries:
is_entry_cohorted = (
course_cohort_settings.is_cohorted and (
always_cohort_inline_discussions or entry["id"] in course_cohort_settings.cohorted_discussions
)
)
title = entry["title"]
if node[level]["entries"][title]:
# If we've already seen this title, append an incrementing number to disambiguate
# the category from other categores sharing the same title in the course discussion UI.
dupe_counters[title] += 1
title = u"{title} ({counter})".format(title=title, counter=dupe_counters[title])
node[level]["entries"][title] = {"id": entry["id"],
"sort_key": entry["sort_key"],
"start_date": entry["start_date"],
"is_cohorted": is_entry_cohorted}
# TODO. BUG! : course location is not unique across multiple course runs!
# (I think Kevin already noticed this) Need to send course_id with requests, store it
# in the backend.
for topic, entry in course.discussion_topics.items():
category_map['entries'][topic] = {
"id": entry["id"],
"sort_key": entry.get("sort_key", topic),
"start_date": datetime.now(UTC()),
"is_cohorted": (course_cohort_settings.is_cohorted and
entry["id"] in course_cohort_settings.cohorted_discussions)
}
_sort_map_entries(category_map, course.discussion_sort_alpha)
return _filter_unstarted_categories(category_map) if exclude_unstarted else category_map
def discussion_category_id_access(course, user, discussion_id):
"""
Returns True iff the given discussion_id is accessible for user in course.
Assumes that the commentable identified by discussion_id has a null or 'course' context.
Uses the discussion id cache if available, falling back to
get_discussion_categories_ids if there is no cache.
"""
if discussion_id in course.top_level_discussion_topic_ids:
return True
try:
key = get_cached_discussion_key(course, discussion_id)
if not key:
return False
module = modulestore().get_item(key)
return has_required_keys(module) and has_access(user, 'load', module, course.id)
except DiscussionIdMapIsNotCached:
return discussion_id in get_discussion_categories_ids(course, user)
def get_discussion_categories_ids(course, user, include_all=False):
"""
Returns a list of available ids of categories for the course that
are accessible to the given user.
Args:
course: Course for which to get the ids.
user: User to check for access.
include_all (bool): If True, return all ids. Used by configuration views.
"""
accessible_discussion_ids = [
module.discussion_id for module in get_accessible_discussion_modules(course, user, include_all=include_all)
]
return course.top_level_discussion_topic_ids + accessible_discussion_ids
class JsonResponse(HttpResponse):
def __init__(self, data=None):
content = json.dumps(data, cls=i4xEncoder)
super(JsonResponse, self).__init__(content,
mimetype='application/json; charset=utf-8')
class JsonError(HttpResponse):
def __init__(self, error_messages=[], status=400):
if isinstance(error_messages, basestring):
error_messages = [error_messages]
content = json.dumps({'errors': error_messages}, indent=2, ensure_ascii=False)
super(JsonError, self).__init__(content,
mimetype='application/json; charset=utf-8', status=status)
class HtmlResponse(HttpResponse):
def __init__(self, html=''):
super(HtmlResponse, self).__init__(html, content_type='text/plain')
class ViewNameMiddleware(object):
def process_view(self, request, view_func, view_args, view_kwargs):
request.view_name = view_func.__name__
class QueryCountDebugMiddleware(object):
"""
This middleware will log the number of queries run
and the total time taken for each request (with a
status code of 200). It does not currently support
multi-db setups.
"""
def process_response(self, request, response):
if response.status_code == 200:
total_time = 0
for query in connection.queries:
query_time = query.get('time')
if query_time is None:
# django-debug-toolbar monkeypatches the connection
# cursor wrapper and adds extra information in each
# item in connection.queries. The query time is stored
# under the key "duration" rather than "time" and is
# in milliseconds, not seconds.
query_time = query.get('duration', 0) / 1000
total_time += float(query_time)
log.info(u'%s queries run, total %s seconds', len(connection.queries), total_time)
return response
def get_ability(course_id, content, user):
return {
'editable': check_permissions_by_view(user, course_id, content, "update_thread" if content['type'] == 'thread' else "update_comment"),
'can_reply': check_permissions_by_view(user, course_id, content, "create_comment" if content['type'] == 'thread' else "create_sub_comment"),
'can_delete': check_permissions_by_view(user, course_id, content, "delete_thread" if content['type'] == 'thread' else "delete_comment"),
'can_openclose': check_permissions_by_view(user, course_id, content, "openclose_thread") if content['type'] == 'thread' else False,
'can_vote': check_permissions_by_view(user, course_id, content, "vote_for_thread" if content['type'] == 'thread' else "vote_for_comment"),
}
# TODO: RENAME
def get_annotated_content_info(course_id, content, user, user_info):
"""
Get metadata for an individual content (thread or comment)
"""
voted = ''
if content['id'] in user_info['upvoted_ids']:
voted = 'up'
elif content['id'] in user_info['downvoted_ids']:
voted = 'down'
return {
'voted': voted,
'subscribed': content['id'] in user_info['subscribed_thread_ids'],
'ability': get_ability(course_id, content, user),
}
# TODO: RENAME
def get_annotated_content_infos(course_id, thread, user, user_info):
"""
Get metadata for a thread and its children
"""
infos = {}
def annotate(content):
infos[str(content['id'])] = get_annotated_content_info(course_id, content, user, user_info)
for child in (
content.get('children', []) +
content.get('endorsed_responses', []) +
content.get('non_endorsed_responses', [])
):
annotate(child)
annotate(thread)
return infos
def get_metadata_for_threads(course_id, threads, user, user_info):
def infogetter(thread):
return get_annotated_content_infos(course_id, thread, user, user_info)
metadata = reduce(merge_dict, map(infogetter, threads), {})
return metadata
# put this method in utils.py to avoid circular import dependency between helpers and mustache_helpers
def render_mustache(template_name, dictionary, *args, **kwargs):
template = lookup_template('main', template_name).source
return pystache.render(template, dictionary)
def permalink(content):
if isinstance(content['course_id'], CourseKey):
course_id = content['course_id'].to_deprecated_string()
else:
course_id = content['course_id']
if content['type'] == 'thread':
return reverse('django_comment_client.forum.views.single_thread',
args=[course_id, content['commentable_id'], content['id']])
else:
return reverse('django_comment_client.forum.views.single_thread',
args=[course_id, content['commentable_id'], content['thread_id']]) + '#' + content['id']
def extend_content(content):
roles = {}
if content.get('user_id'):
try:
user = User.objects.get(pk=content['user_id'])
roles = dict(('name', role.name.lower()) for role in user.roles.filter(course_id=content['course_id']))
except User.DoesNotExist:
log.error(
'User ID %s in comment content %s but not in our DB.',
content.get('user_id'),
content.get('id')
)
content_info = {
'displayed_title': content.get('highlighted_title') or content.get('title', ''),
'displayed_body': content.get('highlighted_body') or content.get('body', ''),
'permalink': permalink(content),
'roles': roles,
'updated': content['created_at'] != content['updated_at'],
}
return merge_dict(content, content_info)
def add_courseware_context(content_list, course, user, id_map=None):
"""
Decorates `content_list` with courseware metadata using the discussion id map cache if available.
"""
if id_map is None:
id_map = get_cached_discussion_id_map(
course,
[content['commentable_id'] for content in content_list],
user
)
for content in content_list:
commentable_id = content['commentable_id']
if commentable_id in id_map:
location = id_map[commentable_id]["location"].to_deprecated_string()
title = id_map[commentable_id]["title"]
url = reverse('jump_to', kwargs={"course_id": course.id.to_deprecated_string(),
"location": location})
content.update({"courseware_url": url, "courseware_title": title})
def prepare_content(content, course_key, is_staff=False, course_is_cohorted=None):
"""
This function is used to pre-process thread and comment models in various
ways before adding them to the HTTP response. This includes fixing empty
attribute fields, enforcing author anonymity, and enriching metadata around
group ownership and response endorsement.
@TODO: not all response pre-processing steps are currently integrated into
this function.
Arguments:
content (dict): A thread or comment.
course_key (CourseKey): The course key of the course.
is_staff (bool): Whether the user is a staff member.
course_is_cohorted (bool): Whether the course is cohorted.
"""
fields = [
'id', 'title', 'body', 'course_id', 'anonymous', 'anonymous_to_peers',
'endorsed', 'parent_id', 'thread_id', 'votes', 'closed', 'created_at',
'updated_at', 'depth', 'type', 'commentable_id', 'comments_count',
'at_position_list', 'children', 'highlighted_title', 'highlighted_body',
'courseware_title', 'courseware_url', 'unread_comments_count',
'read', 'group_id', 'group_name', 'pinned', 'abuse_flaggers',
'stats', 'resp_skip', 'resp_limit', 'resp_total', 'thread_type',
'endorsed_responses', 'non_endorsed_responses', 'non_endorsed_resp_total',
'endorsement', 'context'
]
if (content.get('anonymous') is False) and ((content.get('anonymous_to_peers') is False) or is_staff):
fields += ['username', 'user_id']
content = strip_none(extract(content, fields))
if content.get("endorsement"):
endorsement = content["endorsement"]
endorser = None
if endorsement["user_id"]:
try:
endorser = User.objects.get(pk=endorsement["user_id"])
except User.DoesNotExist:
log.error(
"User ID %s in endorsement for comment %s but not in our DB.",
content.get('user_id'),
content.get('id')
)
# Only reveal endorser if requester can see author or if endorser is staff
if (
endorser and
("username" in fields or has_permission(endorser, "endorse_comment", course_key))
):
endorsement["username"] = endorser.username
else:
del endorsement["user_id"]
if course_is_cohorted is None:
course_is_cohorted = is_course_cohorted(course_key)
for child_content_key in ["children", "endorsed_responses", "non_endorsed_responses"]:
if child_content_key in content:
children = [
prepare_content(child, course_key, is_staff, course_is_cohorted=course_is_cohorted)
for child in content[child_content_key]
]
content[child_content_key] = children
if course_is_cohorted:
# Augment the specified thread info to include the group name if a group id is present.
if content.get('group_id') is not None:
content['group_name'] = get_cohort_by_id(course_key, content.get('group_id')).name
else:
# Remove any cohort information that might remain if the course had previously been cohorted.
content.pop('group_id', None)
return content
def get_group_id_for_comments_service(request, course_key, commentable_id=None):
"""
Given a user requesting content within a `commentable_id`, determine the
group_id which should be passed to the comments service.
Returns:
int: the group_id to pass to the comments service or None if nothing
should be passed
Raises:
ValueError if the requested group_id is invalid
"""
if commentable_id is None or is_commentable_cohorted(course_key, commentable_id):
if request.method == "GET":
requested_group_id = request.GET.get('group_id')
elif request.method == "POST":
requested_group_id = request.POST.get('group_id')
if has_permission(request.user, "see_all_cohorts", course_key):
if not requested_group_id:
return None
try:
group_id = int(requested_group_id)
get_cohort_by_id(course_key, group_id)
except CourseUserGroup.DoesNotExist:
raise ValueError
else:
# regular users always query with their own id.
group_id = get_cohort_id(request.user, course_key)
return group_id
else:
# Never pass a group_id to the comments service for a non-cohorted
# commentable
return None
def is_comment_too_deep(parent):
"""
Determine whether a comment with the given parent violates MAX_COMMENT_DEPTH
parent can be None to determine whether root comments are allowed
"""
return (
MAX_COMMENT_DEPTH is not None and (
MAX_COMMENT_DEPTH < 0 or
(parent and parent["depth"] >= MAX_COMMENT_DEPTH)
)
)
def is_commentable_cohorted(course_key, commentable_id):
"""
Args:
course_key: CourseKey
commentable_id: string
Returns:
Bool: is this commentable cohorted?
Raises:
Http404 if the course doesn't exist.
"""
course = courses.get_course_by_id(course_key)
course_cohort_settings = get_course_cohort_settings(course_key)
if not course_cohort_settings.is_cohorted or get_team(commentable_id):
# this is the easy case :)
ans = False
elif (
commentable_id in course.top_level_discussion_topic_ids or
course_cohort_settings.always_cohort_inline_discussions is False
):
# top level discussions have to be manually configured as cohorted
# (default is not).
# Same thing for inline discussions if the default is explicitly set to False in settings
ans = commentable_id in course_cohort_settings.cohorted_discussions
else:
# inline discussions are cohorted by default
ans = True
log.debug(u"is_commentable_cohorted(%s, %s) = {%s}", course_key, commentable_id, ans)
return ans
| agpl-3.0 |
cchurch/ansible | lib/ansible/modules/monitoring/circonus_annotation.py | 47 | 7258 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014-2015, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: circonus_annotation
short_description: create an annotation in circonus
description:
- Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
author: "Nick Harring (@NickatEpic)"
version_added: 2.0
requirements:
- requests (either >= 2.0.0 for Python 3, or >= 1.0.0 for Python 2)
notes:
- Check mode isn't supported.
options:
api_key:
description:
- Circonus API key
required: true
category:
description:
- Annotation Category
required: true
description:
description:
- Description of annotation
required: true
title:
description:
- Title of annotation
required: true
start:
description:
- Unix timestamp of event start
default: I(now)
stop:
description:
- Unix timestamp of event end
default: I(now) + I(duration)
duration:
description:
- Duration in seconds of annotation
default: 0
'''
EXAMPLES = '''
# Create a simple annotation event with a source, defaults to start and end time of now
- circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
description: This is a detailed description of the config change
category: This category groups like annotations
# Create an annotation with a duration of 5 minutes and a default start time of now
- circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
description: This is a detailed description of the config change
category: This category groups like annotations
duration: 300
# Create an annotation with a start_time and end_time
- circonus_annotation:
api_key: XXXXXXXXXXXXXXXXX
title: App Config Change
description: This is a detailed description of the config change
category: This category groups like annotations
start_time: 1395940006
end_time: 1395954407
'''
RETURN = '''
annotation:
description: details about the created annotation
returned: success
type: complex
contains:
_cid:
description: annotation identifier
returned: success
type: str
sample: /annotation/100000
_created:
description: creation timestamp
returned: success
type: int
sample: 1502236928
_last_modified:
description: last modification timestamp
returned: success
type: int
sample: 1502236928
_last_modified_by:
description: last modified by
returned: success
type: str
sample: /user/1000
category:
description: category of the created annotation
returned: success
type: str
sample: alerts
title:
description: title of the created annotation
returned: success
type: str
sample: WARNING
description:
description: description of the created annotation
returned: success
type: str
sample: Host is down.
start:
description: timestamp, since annotation applies
returned: success
type: int
sample: Host is down.
stop:
description: timestamp, since annotation ends
returned: success
type: str
sample: Host is down.
rel_metrics:
description: Array of metrics related to this annotation, each metrics is a string.
returned: success
type: list
sample:
- 54321_kbps
'''
import json
import time
import traceback
from distutils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
HAS_REQUESTS = True
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
HAS_REQUESTS = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_native
def check_requests_dep(module):
"""Check if an adequate requests version is available"""
if not HAS_REQUESTS:
module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
else:
required_version = '2.0.0' if PY3 else '1.0.0'
if LooseVersion(requests.__version__) < LooseVersion(required_version):
module.fail_json(msg="'requests' library version should be >= %s, found: %s." % (required_version, requests.__version__))
def post_annotation(annotation, api_key):
''' Takes annotation dict and api_key string'''
base_url = 'https://api.circonus.com/v2'
anootate_post_endpoint = '/annotation'
resp = requests.post(base_url + anootate_post_endpoint,
headers=build_headers(api_key), data=json.dumps(annotation))
resp.raise_for_status()
return resp
def create_annotation(module):
''' Takes ansible module object '''
annotation = {}
duration = module.params['duration']
if module.params['start'] is not None:
start = module.params['start']
else:
start = int(time.time())
if module.params['stop'] is not None:
stop = module.params['stop']
else:
stop = int(time.time()) + duration
annotation['start'] = start
annotation['stop'] = stop
annotation['category'] = module.params['category']
annotation['description'] = module.params['description']
annotation['title'] = module.params['title']
return annotation
def build_headers(api_token):
'''Takes api token, returns headers with it included.'''
headers = {'X-Circonus-App-Name': 'ansible',
'Host': 'api.circonus.com', 'X-Circonus-Auth-Token': api_token,
'Accept': 'application/json'}
return headers
def main():
'''Main function, dispatches logic'''
module = AnsibleModule(
argument_spec=dict(
start=dict(type='int'),
stop=dict(type='int'),
category=dict(required=True),
title=dict(required=True),
description=dict(required=True),
duration=dict(default=0, type='int'),
api_key=dict(required=True, no_log=True)
)
)
check_requests_dep(module)
annotation = create_annotation(module)
try:
resp = post_annotation(annotation, module.params['api_key'])
except requests.exceptions.RequestException as e:
module.fail_json(msg='Request Failed', reason=to_native(e), exception=traceback.format_exc())
module.exit_json(changed=True, annotation=resp.json())
if __name__ == '__main__':
main()
| gpl-3.0 |
ibinti/intellij-community | python/helpers/profiler/thriftpy/transport/buffered/__init__.py | 25 | 1561 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from io import BytesIO
from thriftpy._compat import CYTHON
from .. import TTransportBase
class TBufferedTransport(TTransportBase):
"""Class that wraps another transport and buffers its I/O.
The implementation uses a (configurable) fixed-size read buffer
but buffers all writes until a flush is performed.
"""
DEFAULT_BUFFER = 4096
def __init__(self, trans, buf_size=DEFAULT_BUFFER):
self._trans = trans
self._wbuf = BytesIO()
self._rbuf = BytesIO(b"")
self._buf_size = buf_size
def is_open(self):
return self._trans.is_open()
def open(self):
return self._trans.open()
def close(self):
return self._trans.close()
def _read(self, sz):
ret = self._rbuf.read(sz)
if len(ret) != 0:
return ret
self._rbuf = BytesIO(self._trans.read(max(sz, self._buf_size)))
return self._rbuf.read(sz)
def write(self, buf):
self._wbuf.write(buf)
def flush(self):
out = self._wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
self._wbuf = BytesIO()
self._trans.write(out)
self._trans.flush()
def getvalue(self):
return self._trans.getvalue()
class TBufferedTransportFactory(object):
def get_transport(self, trans):
return TBufferedTransport(trans)
if CYTHON:
from .cybuffered import TCyBufferedTransport, TCyBufferedTransportFactory # noqa
| apache-2.0 |
trondhindenes/ansible-modules-extras | monitoring/monit.py | 53 | 6888 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Darryl Stoflet <stoflet@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import time
DOCUMENTATION = '''
---
module: monit
short_description: Manage the state of a program monitored via Monit
description:
- Manage the state of a program monitored via I(Monit)
version_added: "1.2"
options:
name:
description:
- The name of the I(monit) program/process to manage
required: true
default: null
state:
description:
- The state of service
required: true
default: null
choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ]
timeout:
description:
- If there are pending actions for the service monitored by monit, then Ansible will check
for up to this many seconds to verify the the requested action has been performed.
Ansible will sleep for five seconds between each check.
required: false
default: 300
version_added: "2.1"
requirements: [ ]
author: "Darryl Stoflet (@dstoflet)"
'''
EXAMPLES = '''
# Manage the state of program "httpd" to be in "started" state.
- monit: name=httpd state=started
'''
def main():
arg_spec = dict(
name=dict(required=True),
timeout=dict(default=300, type='int'),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded'])
)
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
state = module.params['state']
timeout = module.params['timeout']
MONIT = module.get_bin_path('monit', True)
def status():
"""Return the status of the process in monit, or the empty string if not present."""
rc, out, err = module.run_command('%s summary' % MONIT, check_rc=True)
for line in out.split('\n'):
# Sample output lines:
# Process 'name' Running
# Process 'name' Running - restart pending
parts = line.split()
if len(parts) > 2 and parts[0].lower() == 'process' and parts[1] == "'%s'" % name:
return ' '.join(parts[2:]).lower()
else:
return ''
def run_command(command):
"""Runs a monit command, and returns the new status."""
module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True)
return status()
def wait_for_monit_to_stop_pending():
"""Fails this run if there is no status or it's pending/initalizing for timeout"""
timeout_time = time.time() + timeout
sleep_time = 5
running_status = status()
while running_status == '' or 'pending' in running_status or 'initializing' in running_status:
if time.time() >= timeout_time:
module.fail_json(
msg='waited too long for "pending", or "initiating" status to go away ({0})'.format(
running_status
),
state=state
)
time.sleep(sleep_time)
running_status = status()
if state == 'reloaded':
if module.check_mode:
module.exit_json(changed=True)
rc, out, err = module.run_command('%s reload' % MONIT)
if rc != 0:
module.fail_json(msg='monit reload failed', stdout=out, stderr=err)
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
present = status() != ''
if not present and not state == 'present':
module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state)
if state == 'present':
if not present:
if module.check_mode:
module.exit_json(changed=True)
status = run_command('reload')
if status == '':
wait_for_monit_to_stop_pending()
module.exit_json(changed=True, name=name, state=state)
module.exit_json(changed=False, name=name, state=state)
wait_for_monit_to_stop_pending()
running = 'running' in status()
if running and state in ['started', 'monitored']:
module.exit_json(changed=False, name=name, state=state)
if running and state == 'stopped':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('stop')
if status in ['not monitored'] or 'stop pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not stopped' % name, status=status)
if running and state == 'unmonitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('unmonitor')
if status in ['not monitored'] or 'unmonitor pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not unmonitored' % name, status=status)
elif state == 'restarted':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('restart')
if status in ['initializing', 'running'] or 'restart pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not restarted' % name, status=status)
elif not running and state == 'started':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('start')
if status in ['initializing', 'running'] or 'start pending' in status:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not started' % name, status=status)
elif not running and state == 'monitored':
if module.check_mode:
module.exit_json(changed=True)
status = run_command('monitor')
if status not in ['not monitored']:
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg='%s process not monitored' % name, status=status)
module.exit_json(changed=False, name=name, state=state)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
mancoast/CPythonPyc_test | cpython/212_test_unicodedata.py | 15 | 3864 | """ Test script for the unicodedata module.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
from test_support import verify, verbose
import sha
encoding = 'utf-8'
def test_methods():
h = sha.sha()
for i in range(65536):
char = unichr(i)
data = [
# Predicates (single char)
char.isalnum() and u'1' or u'0',
char.isalpha() and u'1' or u'0',
char.isdecimal() and u'1' or u'0',
char.isdigit() and u'1' or u'0',
char.islower() and u'1' or u'0',
char.isnumeric() and u'1' or u'0',
char.isspace() and u'1' or u'0',
char.istitle() and u'1' or u'0',
char.isupper() and u'1' or u'0',
# Predicates (multiple chars)
(char + u'abc').isalnum() and u'1' or u'0',
(char + u'abc').isalpha() and u'1' or u'0',
(char + u'123').isdecimal() and u'1' or u'0',
(char + u'123').isdigit() and u'1' or u'0',
(char + u'abc').islower() and u'1' or u'0',
(char + u'123').isnumeric() and u'1' or u'0',
(char + u' \t').isspace() and u'1' or u'0',
(char + u'abc').istitle() and u'1' or u'0',
(char + u'ABC').isupper() and u'1' or u'0',
# Mappings (single char)
char.lower(),
char.upper(),
char.title(),
# Mappings (multiple chars)
(char + u'abc').lower(),
(char + u'ABC').upper(),
(char + u'abc').title(),
(char + u'ABC').title(),
]
h.update(u''.join(data).encode(encoding))
return h.hexdigest()
def test_unicodedata():
h = sha.sha()
for i in range(65536):
char = unichr(i)
data = [
# Properties
str(unicodedata.digit(char, -1)),
str(unicodedata.numeric(char, -1)),
str(unicodedata.decimal(char, -1)),
unicodedata.category(char),
unicodedata.bidirectional(char),
unicodedata.decomposition(char),
str(unicodedata.mirrored(char)),
str(unicodedata.combining(char)),
]
h.update(''.join(data))
return h.hexdigest()
### Run tests
print 'Testing Unicode Database...'
print 'Methods:',
print test_methods()
# In case unicodedata is not available, this will raise an ImportError,
# but still test the above cases...
import unicodedata
print 'Functions:',
print test_unicodedata()
# Some additional checks of the API:
print 'API:',
verify(unicodedata.digit(u'A',None) is None)
verify(unicodedata.digit(u'9') == 9)
verify(unicodedata.digit(u'\u215b',None) is None)
verify(unicodedata.digit(u'\u2468') == 9)
verify(unicodedata.numeric(u'A',None) is None)
verify(unicodedata.numeric(u'9') == 9)
verify(unicodedata.numeric(u'\u215b') == 0.125)
verify(unicodedata.numeric(u'\u2468') == 9.0)
verify(unicodedata.decimal(u'A',None) is None)
verify(unicodedata.decimal(u'9') == 9)
verify(unicodedata.decimal(u'\u215b',None) is None)
verify(unicodedata.decimal(u'\u2468',None) is None)
verify(unicodedata.category(u'\uFFFE') == 'Cn')
verify(unicodedata.category(u'a') == 'Ll')
verify(unicodedata.category(u'A') == 'Lu')
verify(unicodedata.bidirectional(u'\uFFFE') == '')
verify(unicodedata.bidirectional(u' ') == 'WS')
verify(unicodedata.bidirectional(u'A') == 'L')
verify(unicodedata.decomposition(u'\uFFFE') == '')
verify(unicodedata.decomposition(u'\u00bc') == '<fraction> 0031 2044 0034')
verify(unicodedata.mirrored(u'\uFFFE') == 0)
verify(unicodedata.mirrored(u'a') == 0)
verify(unicodedata.mirrored(u'\u2201') == 1)
verify(unicodedata.combining(u'\uFFFE') == 0)
verify(unicodedata.combining(u'a') == 0)
verify(unicodedata.combining(u'\u20e1') == 230)
print 'ok'
| gpl-3.0 |
mikelikespie/bazel | third_party/protobuf/3.0.0/python/google/protobuf/internal/_parameterized.py | 88 | 15458 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Adds support for parameterized tests to Python's unittest TestCase class.
A parameterized test is a method in a test case that is invoked with different
argument tuples.
A simple example:
class AdditionExample(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
(1, 2, 3),
(4, 5, 9),
(1, 1, 3))
def testAddition(self, op1, op2, result):
self.assertEqual(result, op1 + op2)
Each invocation is a separate test case and properly isolated just
like a normal test method, with its own setUp/tearDown cycle. In the
example above, there are three separate testcases, one of which will
fail due to an assertion error (1 + 1 != 3).
Parameters for invididual test cases can be tuples (with positional parameters)
or dictionaries (with named parameters):
class AdditionExample(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
{'op1': 1, 'op2': 2, 'result': 3},
{'op1': 4, 'op2': 5, 'result': 9},
)
def testAddition(self, op1, op2, result):
self.assertEqual(result, op1 + op2)
If a parameterized test fails, the error message will show the
original test name (which is modified internally) and the arguments
for the specific invocation, which are part of the string returned by
the shortDescription() method on test cases.
The id method of the test, used internally by the unittest framework,
is also modified to show the arguments. To make sure that test names
stay the same across several invocations, object representations like
>>> class Foo(object):
... pass
>>> repr(Foo())
'<__main__.Foo object at 0x23d8610>'
are turned into '<__main__.Foo>'. For even more descriptive names,
especially in test logs, you can use the NamedParameters decorator. In
this case, only tuples are supported, and the first parameters has to
be a string (or an object that returns an apt name when converted via
str()):
class NamedExample(parameterized.ParameterizedTestCase):
@parameterized.NamedParameters(
('Normal', 'aa', 'aaa', True),
('EmptyPrefix', '', 'abc', True),
('BothEmpty', '', '', True))
def testStartsWith(self, prefix, string, result):
self.assertEqual(result, strings.startswith(prefix))
Named tests also have the benefit that they can be run individually
from the command line:
$ testmodule.py NamedExample.testStartsWithNormal
.
--------------------------------------------------------------------
Ran 1 test in 0.000s
OK
Parameterized Classes
=====================
If invocation arguments are shared across test methods in a single
ParameterizedTestCase class, instead of decorating all test methods
individually, the class itself can be decorated:
@parameterized.Parameters(
(1, 2, 3)
(4, 5, 9))
class ArithmeticTest(parameterized.ParameterizedTestCase):
def testAdd(self, arg1, arg2, result):
self.assertEqual(arg1 + arg2, result)
def testSubtract(self, arg2, arg2, result):
self.assertEqual(result - arg1, arg2)
Inputs from Iterables
=====================
If parameters should be shared across several test cases, or are dynamically
created from other sources, a single non-tuple iterable can be passed into
the decorator. This iterable will be used to obtain the test cases:
class AdditionExample(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
c.op1, c.op2, c.result for c in testcases
)
def testAddition(self, op1, op2, result):
self.assertEqual(result, op1 + op2)
Single-Argument Test Methods
============================
If a test method takes only one argument, the single argument does not need to
be wrapped into a tuple:
class NegativeNumberExample(parameterized.ParameterizedTestCase):
@parameterized.Parameters(
-1, -3, -4, -5
)
def testIsNegative(self, arg):
self.assertTrue(IsNegative(arg))
"""
__author__ = 'tmarek@google.com (Torsten Marek)'
import collections
import functools
import re
import types
try:
import unittest2 as unittest
except ImportError:
import unittest
import uuid
import six
ADDR_RE = re.compile(r'\<([a-zA-Z0-9_\-\.]+) object at 0x[a-fA-F0-9]+\>')
_SEPARATOR = uuid.uuid1().hex
_FIRST_ARG = object()
_ARGUMENT_REPR = object()
def _CleanRepr(obj):
return ADDR_RE.sub(r'<\1>', repr(obj))
# Helper function formerly from the unittest module, removed from it in
# Python 2.7.
def _StrClass(cls):
return '%s.%s' % (cls.__module__, cls.__name__)
def _NonStringIterable(obj):
return (isinstance(obj, collections.Iterable) and not
isinstance(obj, six.string_types))
def _FormatParameterList(testcase_params):
if isinstance(testcase_params, collections.Mapping):
return ', '.join('%s=%s' % (argname, _CleanRepr(value))
for argname, value in testcase_params.items())
elif _NonStringIterable(testcase_params):
return ', '.join(map(_CleanRepr, testcase_params))
else:
return _FormatParameterList((testcase_params,))
class _ParameterizedTestIter(object):
"""Callable and iterable class for producing new test cases."""
def __init__(self, test_method, testcases, naming_type):
"""Returns concrete test functions for a test and a list of parameters.
The naming_type is used to determine the name of the concrete
functions as reported by the unittest framework. If naming_type is
_FIRST_ARG, the testcases must be tuples, and the first element must
have a string representation that is a valid Python identifier.
Args:
test_method: The decorated test method.
testcases: (list of tuple/dict) A list of parameter
tuples/dicts for individual test invocations.
naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR.
"""
self._test_method = test_method
self.testcases = testcases
self._naming_type = naming_type
def __call__(self, *args, **kwargs):
raise RuntimeError('You appear to be running a parameterized test case '
'without having inherited from parameterized.'
'ParameterizedTestCase. This is bad because none of '
'your test cases are actually being run.')
def __iter__(self):
test_method = self._test_method
naming_type = self._naming_type
def MakeBoundParamTest(testcase_params):
@functools.wraps(test_method)
def BoundParamTest(self):
if isinstance(testcase_params, collections.Mapping):
test_method(self, **testcase_params)
elif _NonStringIterable(testcase_params):
test_method(self, *testcase_params)
else:
test_method(self, testcase_params)
if naming_type is _FIRST_ARG:
# Signal the metaclass that the name of the test function is unique
# and descriptive.
BoundParamTest.__x_use_name__ = True
BoundParamTest.__name__ += str(testcase_params[0])
testcase_params = testcase_params[1:]
elif naming_type is _ARGUMENT_REPR:
# __x_extra_id__ is used to pass naming information to the __new__
# method of TestGeneratorMetaclass.
# The metaclass will make sure to create a unique, but nondescriptive
# name for this test.
BoundParamTest.__x_extra_id__ = '(%s)' % (
_FormatParameterList(testcase_params),)
else:
raise RuntimeError('%s is not a valid naming type.' % (naming_type,))
BoundParamTest.__doc__ = '%s(%s)' % (
BoundParamTest.__name__, _FormatParameterList(testcase_params))
if test_method.__doc__:
BoundParamTest.__doc__ += '\n%s' % (test_method.__doc__,)
return BoundParamTest
return (MakeBoundParamTest(c) for c in self.testcases)
def _IsSingletonList(testcases):
"""True iff testcases contains only a single non-tuple element."""
return len(testcases) == 1 and not isinstance(testcases[0], tuple)
def _ModifyClass(class_object, testcases, naming_type):
assert not getattr(class_object, '_id_suffix', None), (
'Cannot add parameters to %s,'
' which already has parameterized methods.' % (class_object,))
class_object._id_suffix = id_suffix = {}
# We change the size of __dict__ while we iterate over it,
# which Python 3.x will complain about, so use copy().
for name, obj in class_object.__dict__.copy().items():
if (name.startswith(unittest.TestLoader.testMethodPrefix)
and isinstance(obj, types.FunctionType)):
delattr(class_object, name)
methods = {}
_UpdateClassDictForParamTestCase(
methods, id_suffix, name,
_ParameterizedTestIter(obj, testcases, naming_type))
for name, meth in methods.items():
setattr(class_object, name, meth)
def _ParameterDecorator(naming_type, testcases):
"""Implementation of the parameterization decorators.
Args:
naming_type: The naming type.
testcases: Testcase parameters.
Returns:
A function for modifying the decorated object.
"""
def _Apply(obj):
if isinstance(obj, type):
_ModifyClass(
obj,
list(testcases) if not isinstance(testcases, collections.Sequence)
else testcases,
naming_type)
return obj
else:
return _ParameterizedTestIter(obj, testcases, naming_type)
if _IsSingletonList(testcases):
assert _NonStringIterable(testcases[0]), (
'Single parameter argument must be a non-string iterable')
testcases = testcases[0]
return _Apply
def Parameters(*testcases):
"""A decorator for creating parameterized tests.
See the module docstring for a usage example.
Args:
*testcases: Parameters for the decorated method, either a single
iterable, or a list of tuples/dicts/objects (for tests
with only one argument).
Returns:
A test generator to be handled by TestGeneratorMetaclass.
"""
return _ParameterDecorator(_ARGUMENT_REPR, testcases)
def NamedParameters(*testcases):
"""A decorator for creating parameterized tests.
See the module docstring for a usage example. The first element of
each parameter tuple should be a string and will be appended to the
name of the test method.
Args:
*testcases: Parameters for the decorated method, either a single
iterable, or a list of tuples.
Returns:
A test generator to be handled by TestGeneratorMetaclass.
"""
return _ParameterDecorator(_FIRST_ARG, testcases)
class TestGeneratorMetaclass(type):
"""Metaclass for test cases with test generators.
A test generator is an iterable in a testcase that produces callables. These
callables must be single-argument methods. These methods are injected into
the class namespace and the original iterable is removed. If the name of the
iterable conforms to the test pattern, the injected methods will be picked
up as tests by the unittest framework.
In general, it is supposed to be used in conjunction with the
Parameters decorator.
"""
def __new__(mcs, class_name, bases, dct):
dct['_id_suffix'] = id_suffix = {}
for name, obj in dct.items():
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
_NonStringIterable(obj)):
iterator = iter(obj)
dct.pop(name)
_UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator)
return type.__new__(mcs, class_name, bases, dct)
def _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator):
"""Adds individual test cases to a dictionary.
Args:
dct: The target dictionary.
id_suffix: The dictionary for mapping names to test IDs.
name: The original name of the test case.
iterator: The iterator generating the individual test cases.
"""
for idx, func in enumerate(iterator):
assert callable(func), 'Test generators must yield callables, got %r' % (
func,)
if getattr(func, '__x_use_name__', False):
new_name = func.__name__
else:
new_name = '%s%s%d' % (name, _SEPARATOR, idx)
assert new_name not in dct, (
'Name of parameterized test case "%s" not unique' % (new_name,))
dct[new_name] = func
id_suffix[new_name] = getattr(func, '__x_extra_id__', '')
class ParameterizedTestCase(unittest.TestCase):
"""Base class for test cases using the Parameters decorator."""
__metaclass__ = TestGeneratorMetaclass
def _OriginalName(self):
return self._testMethodName.split(_SEPARATOR)[0]
def __str__(self):
return '%s (%s)' % (self._OriginalName(), _StrClass(self.__class__))
def id(self): # pylint: disable=invalid-name
"""Returns the descriptive ID of the test.
This is used internally by the unittesting framework to get a name
for the test to be used in reports.
Returns:
The test id.
"""
return '%s.%s%s' % (_StrClass(self.__class__),
self._OriginalName(),
self._id_suffix.get(self._testMethodName, ''))
def CoopParameterizedTestCase(other_base_class):
"""Returns a new base class with a cooperative metaclass base.
This enables the ParameterizedTestCase to be used in combination
with other base classes that have custom metaclasses, such as
mox.MoxTestBase.
Only works with metaclasses that do not override type.__new__.
Example:
import google3
import mox
from google3.testing.pybase import parameterized
class ExampleTest(parameterized.CoopParameterizedTestCase(mox.MoxTestBase)):
...
Args:
other_base_class: (class) A test case base class.
Returns:
A new class object.
"""
metaclass = type(
'CoopMetaclass',
(other_base_class.__metaclass__,
TestGeneratorMetaclass), {})
return metaclass(
'CoopParameterizedTestCase',
(other_base_class, ParameterizedTestCase), {})
| apache-2.0 |
BlackPole/bp-dvbapp | Navigation.py | 3 | 4881 | from enigma import eServiceCenter, eServiceReference, eTimer, pNavigation, getBestPlayableServiceReference, iPlayableService
from Components.ParentalControl import parentalControl
from Tools.BoundFunction import boundFunction
from Tools.DreamboxHardware import setFPWakeuptime, getFPWakeuptime, getFPWasTimerWakeup
from time import time
import RecordTimer
import SleepTimer
import Screens.Standby
import NavigationInstance
import ServiceReference
# TODO: remove pNavgation, eNavigation and rewrite this stuff in python.
class Navigation:
def __init__(self, nextRecordTimerAfterEventActionAuto=False):
if NavigationInstance.instance is not None:
raise NavigationInstance.instance
NavigationInstance.instance = self
self.ServiceHandler = eServiceCenter.getInstance()
import Navigation as Nav
Nav.navcore = self
self.pnav = pNavigation()
self.pnav.m_event.get().append(self.dispatchEvent)
self.pnav.m_record_event.get().append(self.dispatchRecordEvent)
self.event = [ ]
self.record_event = [ ]
self.currentlyPlayingServiceReference = None
self.currentlyPlayingService = None
self.RecordTimer = RecordTimer.RecordTimer()
if getFPWasTimerWakeup():
if nextRecordTimerAfterEventActionAuto:
# We need to give the systemclock the chance to sync with the transponder time,
# before we will make the decision about whether or not we need to shutdown
# after the upcoming recording has completed
self.recordshutdowntimer = eTimer()
self.recordshutdowntimer.callback.append(self.checkShutdownAfterRecording)
self.recordshutdowntimer.start(30000, True)
self.SleepTimer = SleepTimer.SleepTimer()
def checkShutdownAfterRecording(self):
if len(self.getRecordings()) or abs(self.RecordTimer.getNextRecordingTime() - time()) <= 360:
if not Screens.Standby.inTryQuitMainloop: # not a shutdown messagebox is open
RecordTimer.RecordTimerEntry.TryQuitMainloop(False) # start shutdown handling
def dispatchEvent(self, i):
for x in self.event:
x(i)
if i == iPlayableService.evEnd:
self.currentlyPlayingServiceReference = None
self.currentlyPlayingService = None
def dispatchRecordEvent(self, rec_service, event):
# print "record_event", rec_service, event
for x in self.record_event:
x(rec_service, event)
def playService(self, ref, checkParentalControl = True, forceRestart = False):
oldref = self.currentlyPlayingServiceReference
if ref and oldref and ref == oldref and not forceRestart:
print "ignore request to play already running service(1)"
return 0
print "playing", ref and ref.toString()
if ref is None:
self.stopService()
return 0
if not checkParentalControl or parentalControl.isServicePlayable(ref, boundFunction(self.playService, checkParentalControl = False)):
if ref.flags & eServiceReference.isGroup:
if not oldref:
oldref = eServiceReference()
playref = getBestPlayableServiceReference(ref, oldref)
print "playref", playref
if playref and oldref and playref == oldref and not forceRestart:
print "ignore request to play already running service(2)"
return 0
if not playref or (checkParentalControl and not parentalControl.isServicePlayable(playref, boundFunction(self.playService, checkParentalControl = False))):
self.stopService()
return 0
else:
playref = ref
if self.pnav:
self.pnav.stopService()
self.currentlyPlayingServiceReference = playref
if self.pnav.playService(playref):
print "Failed to start", playref
self.currentlyPlayingServiceReference = None
return 0
else:
self.stopService()
return 1
def getCurrentlyPlayingServiceReference(self):
return self.currentlyPlayingServiceReference
def recordService(self, ref, simulate=False):
service = None
if not simulate: print "recording service: %s" % (str(ref))
if isinstance(ref, ServiceReference.ServiceReference):
ref = ref.ref
if ref:
if ref.flags & eServiceReference.isGroup:
ref = getBestPlayableServiceReference(ref, eServiceReference(), simulate)
service = ref and self.pnav and self.pnav.recordService(ref, simulate)
if service is None:
print "record returned non-zero"
return service
def stopRecordService(self, service):
ret = self.pnav and self.pnav.stopRecordService(service)
return ret
def getRecordings(self, simulate=False):
return self.pnav and self.pnav.getRecordings(simulate)
def getCurrentService(self):
if not self.currentlyPlayingService:
self.currentlyPlayingService = self.pnav and self.pnav.getCurrentService()
return self.currentlyPlayingService
def stopService(self):
if self.pnav:
self.pnav.stopService()
def pause(self, p):
return self.pnav and self.pnav.pause(p)
def shutdown(self):
self.RecordTimer.shutdown()
self.ServiceHandler = None
self.pnav = None
def stopUserServices(self):
self.stopService()
| gpl-2.0 |
WarrenWeckesser/scikits-image | skimage/graph/tests/test_connect.py | 40 | 2632 | import skimage.graph.mcp as mcp
# import stentseg.graph._mcp as mcp
from numpy.testing import (assert_array_equal,
assert_almost_equal,
)
import numpy as np
a = np.ones((8, 8), dtype=np.float32)
count = 0
class MCP(mcp.MCP_Connect):
def _reset(self):
""" Reset the id map.
"""
mcp.MCP_Connect._reset(self)
self._conn = {}
self._bestconn = {}
def create_connection(self, id1, id2, pos1, pos2, cost1, cost2):
# Process data
hash = min(id1, id2), max(id1, id2)
val = min(pos1, pos2), max(pos1, pos2)
cost = min(cost1, cost2)
# Add to total list
self._conn.setdefault(hash, []).append(val)
# Keep track of connection with lowest cost
curcost = self._bestconn.get(hash, (np.inf,))[0]
if cost < curcost:
self._bestconn[hash] = (cost,) + val
def test_connections():
# Create MCP object with three seed points
mcp = MCP(a)
costs, traceback = mcp.find_costs([ (1,1), (7,7), (1,7) ])
# Test that all three seed points are connected
connections = set(mcp._conn.keys())
assert (0, 1) in connections
assert (1, 2) in connections
assert (0, 2) in connections
# Test that any two neighbors have only been connected once
for position_tuples in mcp._conn.values():
n1 = len(position_tuples)
n2 = len(set(position_tuples))
assert n1 == n2
# For seed 0 and 1
cost, pos1, pos2 = mcp._bestconn[(0,1)]
# Test meeting points
assert (pos1, pos2) == ( (3,3), (4,4) )
# Test the whole path
path = mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2)))
assert_array_equal(path,
[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7)])
# For seed 1 and 2
cost, pos1, pos2 = mcp._bestconn[(1,2)]
# Test meeting points
assert (pos1, pos2) == ( (3,7), (4,7) )
# Test the whole path
path = mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2)))
assert_array_equal(path,
[(1, 7), (2, 7), (3, 7), (4, 7), (5, 7), (6, 7), (7, 7)])
# For seed 0 and 2
cost, pos1, pos2 = mcp._bestconn[(0,2)]
# Test meeting points
assert (pos1, pos2) == ( (1,3), (1,4) )
# Test the whole path
path = mcp.traceback(pos1) + list(reversed(mcp.traceback(pos2)))
assert_array_equal(path,
[(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7)])
if __name__ == "__main__":
np.testing.run_module_suite() | bsd-3-clause |
analurandis/Tur | backend/venv/Lib/site-packages/sphinx/util/console.py | 11 | 2430 | # -*- coding: utf-8 -*-
"""
sphinx.util.console
~~~~~~~~~~~~~~~~~~~
Format colored console output.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import re
_ansi_re = re.compile('\x1b\\[(\\d\\d;){0,2}\\d\\dm')
codes = {}
def get_terminal_width():
"""Borrowed from the py lib."""
try:
import termios, fcntl, struct
call = fcntl.ioctl(0, termios.TIOCGWINSZ,
struct.pack('hhhh', 0, 0, 0, 0))
height, width = struct.unpack('hhhh', call)[:2]
terminal_width = width
except (SystemExit, KeyboardInterrupt):
raise
except:
# FALLBACK
terminal_width = int(os.environ.get('COLUMNS', 80)) - 1
return terminal_width
_tw = get_terminal_width()
def term_width_line(text):
if not codes:
# if no coloring, don't output fancy backspaces
return text + '\n'
else:
# codes are not displayed, this must be taken into account
return text.ljust(_tw + len(text) - len(_ansi_re.sub('', text))) + '\r'
def color_terminal():
if not hasattr(sys.stdout, 'isatty'):
return False
if not sys.stdout.isatty():
return False
if 'COLORTERM' in os.environ:
return True
term = os.environ.get('TERM', 'dumb').lower()
if term in ('xterm', 'linux') or 'color' in term:
return True
return False
def nocolor():
codes.clear()
def coloron():
codes.update(_orig_codes)
def colorize(name, text):
return codes.get(name, '') + text + codes.get('reset', '')
def create_color_func(name):
def inner(text):
return colorize(name, text)
globals()[name] = inner
_attrs = {
'reset': '39;49;00m',
'bold': '01m',
'faint': '02m',
'standout': '03m',
'underline': '04m',
'blink': '05m',
}
for _name, _value in _attrs.items():
codes[_name] = '\x1b[' + _value
_colors = [
('black', 'darkgray'),
('darkred', 'red'),
('darkgreen', 'green'),
('brown', 'yellow'),
('darkblue', 'blue'),
('purple', 'fuchsia'),
('turquoise', 'teal'),
('lightgray', 'white'),
]
for i, (dark, light) in enumerate(_colors):
codes[dark] = '\x1b[%im' % (i+30)
codes[light] = '\x1b[%i;01m' % (i+30)
_orig_codes = codes.copy()
for _name in codes:
create_color_func(_name)
| mit |
scottmcmaster/catapult | tracing/third_party/tvcm/third_party/rjsmin/bench/main.py | 12 | 7298 | #!/usr/bin/env python
# -*- coding: ascii -*-
r"""
=================================
Benchmark jsmin implementations
=================================
Benchmark jsmin implementations.
:Copyright:
Copyright 2011 - 2014
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Usage::
python -mbench.main [-c COUNT] [-p file] jsfile ...
-c COUNT number of runs per jsfile and minifier. Defaults to 10.
-p file File to write the benchmark results in (pickled)
"""
if __doc__:
__doc__ = __doc__.encode('ascii').decode('unicode_escape')
__author__ = r"Andr\xe9 Malo".encode('ascii').decode('unicode_escape')
__docformat__ = "restructuredtext en"
__license__ = "Apache License, Version 2.0"
__version__ = "1.0.0"
import sys as _sys
import time as _time
import_notes = []
class jsmins(object):
from bench import jsmin as p_01_simple_port
if _sys.version_info >= (2, 4):
from bench import jsmin_2_0_9 as p_02_jsmin_2_0_9
else:
import_notes.append(
"jsmin_2_0_9 available for python 2.4 and later..."
)
print(import_notes[-1])
try:
import slimit as _slimit_0_8_1
except (ImportError, SyntaxError):
import_notes.append("slimit_0_8_1 could not be imported")
print(import_notes[-1])
else:
class p_03_slimit_0_8_1(object):
pass
p_03_slimit_0_8_1 = p_03_slimit_0_8_1()
p_03_slimit_0_8_1.jsmin = _slimit_0_8_1.minify
class p_04_slimit_0_8_1_mangle(object):
pass
p_04_slimit_0_8_1_mangle = p_04_slimit_0_8_1_mangle()
p_04_slimit_0_8_1_mangle.jsmin = \
lambda x, s=_slimit_0_8_1: s.minify(x, True)
import rjsmin as p_05_rjsmin
try:
import _rjsmin as p_06__rjsmin
except ImportError:
import_notes.append("_rjsmin (C-Port) not available")
print(import_notes[-1])
jsmins.p_05_rjsmin.jsmin = jsmins.p_05_rjsmin._make_jsmin(
python_only=True
)
print("Python Release: %s" % ".".join(map(str, _sys.version_info[:3])))
print("")
def slurp(filename):
""" Load a file """
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
def print_(*value, **kwargs):
""" Print stuff """
(kwargs.get('file') or _sys.stdout).write(
''.join(value) + kwargs.get('end', '\n')
)
def bench(filenames, count):
"""
Benchmark the minifiers with given javascript samples
:Parameters:
`filenames` : sequence
List of filenames
`count` : ``int``
Number of runs per js file and minifier
:Exceptions:
- `RuntimeError` : empty filenames sequence
"""
if not filenames:
raise RuntimeError("Missing files to benchmark")
try:
xrange
except NameError:
xrange = range
try:
cmp
except NameError:
cmp = lambda a, b: (a > b) - (a < b)
ports = [item for item in dir(jsmins) if item.startswith('p_')]
ports.sort()
space = max(map(len, ports)) - 4
ports = [(item[5:], getattr(jsmins, item).jsmin) for item in ports]
flush = _sys.stdout.flush
struct = []
inputs = [(filename, slurp(filename)) for filename in filenames]
for filename, script in inputs:
print_("Benchmarking %r..." % filename, end=" ")
flush()
outputs = []
for _, jsmin in ports:
try:
outputs.append(jsmin(script))
except (SystemExit, KeyboardInterrupt):
raise
except:
outputs.append(None)
struct.append(dict(
filename=filename,
sizes=[
(item is not None and len(item) or None) for item in outputs
],
size=len(script),
messages=[],
times=[],
))
print_("(%.1f KiB)" % (struct[-1]['size'] / 1024.0,))
flush()
times = []
for idx, (name, jsmin) in enumerate(ports):
if outputs[idx] is None:
print_(" FAILED %s" % (name,))
struct[-1]['times'].append((name, None))
else:
print_(" Timing %s%s... (%5.1f KiB %s)" % (
name,
" " * (space - len(name)),
len(outputs[idx]) / 1024.0,
idx == 0 and '*' or ['=', '>', '<'][
cmp(len(outputs[idx]), len(outputs[0]))
],
), end=" ")
flush()
xcount = count
while True:
counted = [None for _ in xrange(xcount)]
start = _time.time()
for _ in counted:
jsmin(script)
end = _time.time()
result = (end - start) * 1000
if result < 10: # avoid measuring within the error range
xcount *= 10
continue
times.append(result / xcount)
break
print_("%8.2f ms" % times[-1], end=" ")
flush()
if len(times) <= 1:
print_()
else:
print_("(factor: %s)" % (', '.join([
'%.2f' % (timed / times[-1]) for timed in times[:-1]
])))
struct[-1]['times'].append((name, times[-1]))
flush()
print_()
return struct
def main(argv=None):
""" Main """
import getopt as _getopt
import os as _os
import pickle as _pickle
if argv is None:
argv = _sys.argv[1:]
try:
opts, args = _getopt.getopt(argv, "hc:p:", ["help"])
except getopt.GetoptError:
e = _sys.exc_info()[0](_sys.exc_info()[1])
print >> _sys.stderr, "%s\nTry %s -mbench.main --help" % (
e,
_os.path.basename(_sys.executable),
)
_sys.exit(2)
count, pickle = 10, None
for key, value in opts:
if key in ("-h", "--help"):
print >> _sys.stderr, (
"%s -mbench.main [-c count] [-p file] cssfile ..." % (
_os.path.basename(_sys.executable),
)
)
_sys.exit(0)
elif key == '-c':
count = int(value)
elif key == '-p':
pickle = str(value)
struct = bench(args, count)
if pickle:
fp = open(pickle, 'wb')
try:
fp.write(_pickle.dumps((
".".join(map(str, _sys.version_info[:3])),
import_notes,
struct,
), 0))
finally:
fp.close()
if __name__ == '__main__':
main()
| bsd-3-clause |
corywalker/selenium-crawler | seleniumcrawler/tests/test_all.py | 2 | 1261 | # Global modules
import unittest
# Local modules
from seleniumcrawler.handle import handle_url
class TestHandlers(unittest.TestCase):
def test_forbes(self):
r = handle_url('http://www.forbes.com/sites/abrambrown/2013/04/22/netflixs-profit-picture-clears-q1s-big-beat-surprises-wall-street/')
self.assertEqual(r['handler'], 'forbes')
self.assertTrue('You need to go back to 2011, to shortly before the Qwikster')
def test_hnews(self):
r = handle_url('https://news.ycombinator.com/item?id=5612912')
self.assertEqual(r['handler'], 'hnews')
self.assertTrue('Wolfe, Cockrell and the rest of the team got a couple of Nexus')
def test_reddit(self):
r = handle_url('http://www.reddit.com/r/technology/comments/1d5ptg/the_force_of_fiber_google_fiber_is_pressuring/')
self.assertEqual(r['handler'], 'reddit')
self.assertTrue('that there is no public data that paints a complete picture')
def test_hackaday(self):
r = handle_url('http://hackaday.com/2013/04/26/old-led-marquee-turned-embedded-video-player/')
self.assertEqual(r['handler'], 'hackaday')
self.assertTrue('A better look at the industrial PC.')
if __name__ == '__main__':
unittest.main()
| mit |
adobecs5/urp2015 | lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/eucjpprober.py | 2919 | 3678 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| apache-2.0 |
mkolar/pyblish-ftrack | setup.py | 2 | 1121 | """This setup script packages pyblish_ftrack"""
import os
import imp
from setuptools import setup, find_packages
version_file = os.path.abspath("pyblish_ftrack/version.py")
version_mod = imp.load_source("version", version_file)
version = version_mod.version
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities"
]
setup(
name="pyblish-ftrack",
version=version,
packages=find_packages(),
url="https://github.com/pyblish/pyblish-ftrack",
license="LGPL",
author="Abstract Factory and Contributors",
author_email="marcus@abstractfactory.io",
description="Ftrack Pyblish package",
zip_safe=False,
classifiers=classifiers,
install_requires=[
"pyblish-base>=1.4"
],
)
| lgpl-3.0 |
schmidtc/pysal | pysal/esda/tests/test_gamma.py | 14 | 2474 | import unittest
import numpy as np
import pysal
from pysal.esda.gamma import Gamma
class Gamma_Tester(unittest.TestCase):
"""Unit test for Gamma Index"""
def setUp(self):
self.w = pysal.lat2W(4, 4)
self.y = np.ones(16)
self.y[0:8] = 0
def test_Gamma(self):
"""Test method"""
np.random.seed(12345)
g = Gamma(self.y, self.w)
self.assertAlmostEquals(g.g, 20.0)
self.assertAlmostEquals(g.g_z, 3.1879280354548638)
self.assertAlmostEquals(g.p_sim_g, 0.0030000000000000001)
self.assertAlmostEquals(g.min_g, 0.0)
self.assertAlmostEquals(g.max_g, 20.0)
self.assertAlmostEquals(g.mean_g, 11.093093093093094)
np.random.seed(12345)
g1 = Gamma(self.y, self.w, operation='s')
self.assertAlmostEquals(g1.g, 8.0)
self.assertAlmostEquals(g1.g_z, -3.7057554345954791)
self.assertAlmostEquals(g1.p_sim_g, 0.001)
self.assertAlmostEquals(g1.min_g, 14.0)
self.assertAlmostEquals(g1.max_g, 48.0)
self.assertAlmostEquals(g1.mean_g, 25.623623623623622)
np.random.seed(12345)
g2 = Gamma(self.y, self.w, operation='a')
self.assertAlmostEquals(g2.g, 8.0)
self.assertAlmostEquals(g2.g_z, -3.7057554345954791)
self.assertAlmostEquals(g2.p_sim_g, 0.001)
self.assertAlmostEquals(g2.min_g, 14.0)
self.assertAlmostEquals(g2.max_g, 48.0)
self.assertAlmostEquals(g2.mean_g, 25.623623623623622)
np.random.seed(12345)
g3 = Gamma(self.y, self.w, standardize='y')
self.assertAlmostEquals(g3.g, 32.0)
self.assertAlmostEquals(g3.g_z, 3.7057554345954791)
self.assertAlmostEquals(g3.p_sim_g, 0.001)
self.assertAlmostEquals(g3.min_g, -48.0)
self.assertAlmostEquals(g3.max_g, 20.0)
self.assertAlmostEquals(g3.mean_g, -3.2472472472472473)
np.random.seed(12345)
def func(z, i, j):
q = z[i] * z[j]
return q
g4 = Gamma(self.y, self.w, operation=func)
self.assertAlmostEquals(g4.g, 20.0)
self.assertAlmostEquals(g4.g_z, 3.1879280354548638)
self.assertAlmostEquals(g4.p_sim_g, 0.0030000000000000001)
suite = unittest.TestSuite()
test_classes = [Gamma_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| bsd-3-clause |
hurricup/intellij-community | python/lib/Lib/site-packages/django/contrib/comments/signals.py | 425 | 1079 | """
Signals relating to comments.
"""
from django.dispatch import Signal
# Sent just before a comment will be posted (after it's been approved and
# moderated; this can be used to modify the comment (in place) with posting
# details or other such actions. If any receiver returns False the comment will be
# discarded and a 403 (not allowed) response. This signal is sent at more or less
# the same time (just before, actually) as the Comment object's pre-save signal,
# except that the HTTP request is sent along with this signal.
comment_will_be_posted = Signal(providing_args=["comment", "request"])
# Sent just after a comment was posted. See above for how this differs
# from the Comment object's post-save signal.
comment_was_posted = Signal(providing_args=["comment", "request"])
# Sent after a comment was "flagged" in some way. Check the flag to see if this
# was a user requesting removal of a comment, a moderator approving/removing a
# comment, or some other custom user flag.
comment_was_flagged = Signal(providing_args=["comment", "flag", "created", "request"])
| apache-2.0 |
erickt/pygments | pygments/scanner.py | 5 | 3114 | # -*- coding: utf-8 -*-
"""
pygments.scanner
~~~~~~~~~~~~~~~~
This library implements a regex based scanner. Some languages
like Pascal are easy to parse but have some keywords that
depend on the context. Because of this it's impossible to lex
that just by using a regular expression lexer like the
`RegexLexer`.
Have a look at the `DelphiLexer` to get an idea of how to use
this scanner.
:copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
class EndOfText(RuntimeError):
"""
Raise if end of text is reached and the user
tried to call a match function.
"""
class Scanner(object):
"""
Simple scanner
All method patterns are regular expression strings (not
compiled expressions!)
"""
def __init__(self, text, flags=0):
"""
:param text: The text which should be scanned
:param flags: default regular expression flags
"""
self.data = text
self.data_length = len(text)
self.start_pos = 0
self.pos = 0
self.flags = flags
self.last = None
self.match = None
self._re_cache = {}
def eos(self):
"""`True` if the scanner reached the end of text."""
return self.pos >= self.data_length
eos = property(eos, eos.__doc__)
def check(self, pattern):
"""
Apply `pattern` on the current position and return
the match object. (Doesn't touch pos). Use this for
lookahead.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
return self._re_cache[pattern].match(self.data, self.pos)
def test(self, pattern):
"""Apply a pattern on the current position and check
if it patches. Doesn't touch pos."""
return self.check(pattern) is not None
def scan(self, pattern):
"""
Scan the text for the given pattern and update pos/match
and related fields. The return value is a boolen that
indicates if the pattern matched. The matched value is
stored on the instance as ``match``, the last value is
stored as ``last``. ``start_pos`` is the position of the
pointer before the pattern was matched, ``pos`` is the
end position.
"""
if self.eos:
raise EndOfText()
if pattern not in self._re_cache:
self._re_cache[pattern] = re.compile(pattern, self.flags)
self.last = self.match
m = self._re_cache[pattern].match(self.data, self.pos)
if m is None:
return False
self.start_pos = m.start()
self.pos = m.end()
self.match = m.group()
return True
def get_char(self):
"""Scan exactly one char."""
self.scan('.')
def __repr__(self):
return '<%s %d/%d>' % (
self.__class__.__name__,
self.pos,
self.data_length
)
| bsd-2-clause |
wanderine/nipype | nipype/interfaces/fsl/tests/test_auto_AvScale.py | 9 | 1188 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.utils import AvScale
def test_AvScale_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
mat_file=dict(argstr='%s',
position=0,
),
output_type=dict(),
terminal_output=dict(nohash=True,
),
)
inputs = AvScale.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_AvScale_outputs():
output_map = dict(average_scaling=dict(),
backward_half_transform=dict(),
determinant=dict(),
forward_half_transform=dict(),
left_right_orientation_preserved=dict(),
rotation_translation_matrix=dict(),
scales=dict(),
skews=dict(),
)
outputs = AvScale.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| bsd-3-clause |
pierce403/EmpirePanel | lib/modules/situational_awareness/network/powerview/get_domain_controller.py | 1 | 3326 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-NetDomainController',
'Author': ['@harmj0y'],
'Description': ('Returns the domain controllers for the current domain or '
'the specified domain. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Domain' : {
'Description' : 'The domain to query for domain controllers.',
'Required' : False,
'Value' : ''
},
'DomainController' : {
'Description' : 'Domain controller to reflect LDAP queries through.',
'Required' : False,
'Value' : ''
},
'LDAP' : {
'Description' : 'Switch. Use LDAP queries to determine the domain controllers.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(moduleCode, moduleName)
script += moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
return script | bsd-3-clause |
ffu/DSA-3.2.2 | gr-radio-astronomy/src/python/local_calibrator.py | 11 | 5595 | #!/usr/bin/env python
#
# Copyright 2003,2004,2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import Numeric
import math
import ephem
import time
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# NO LONGER USED
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
#
#
# Simple class for allowing local definition of a calibration function
# for raw samples coming from the RA detector chain. Each observatory
# is different, and rather than hacking up the main code in usrp_ra_receiver
# we define the appropriate function here.
#
# For example, one could calibrate the output in Janskys, rather than
# dB.
#
#
def calib_default_total_power(data):
r = 10.0*math.log10(data)
return(r)
def calib_numogate_ridge_observatory_total_power(data):
me = ephem.Observer()
#
# PyEphem wants lat/long as strings, rather than floats--took me quite
# a long time to figure that out. If they don't arrive as strings,
# the calculations for sidereal time are complete garbage
#
me.long = globals()["calib_long"]
me.lat = globals()["calib_lat"]
me.date = ephem.now()
sidtime = me.sidereal_time()
foo = time.localtime()
if not "calib_prefix" in globals():
pfx = "./"
else:
pfx = globals()["calib_prefix"]
filenamestr = "%s/%04d%02d%02d%02d" % (pfx, foo.tm_year,
foo.tm_mon, foo.tm_mday, foo.tm_hour)
numogate_file = open (filenamestr+".tpdat","a")
r = (data / 409.6)
flt = "%6.3f" % r
#r = calib_default_total_power(data)
inter = globals()["calib_decln"]
integ = globals()["calib_integ_setting"]
fc = globals()["calib_freq_setting"]
fc = fc / 1000000
bw = globals()["calib_bw_setting"]
bw = bw / 1000000
ga = globals()["calib_gain_setting"]
now = time.time()
if not "calib_then_tpdat" in globals():
globals()["calib_then_tpdat"] = now
if (now - globals()["calib_then_tpdat"]) >= 20:
globals()["calib_then_tpdat"] = now
numogate_file.write(str(ephem.hours(sidtime))+" "+flt+" Dn="+str(inter)+",")
numogate_file.write("Ti="+str(integ)+",Fc="+str(fc)+",Bw="+str(bw))
numogate_file.write(",Ga="+str(ga)+"\n")
else:
numogate_file.write(str(ephem.hours(sidtime))+" "+flt+"\n")
numogate_file.close()
return(r)
def calib_numogate_ridge_observatory_fft(data,l):
me = ephem.Observer()
#
# PyEphem wants lat/long as strings, rather than floats--took me quite
# a long time to figure that out. If they don't arrive as strings,
# the calculations for sidereal time are complete garbage
#
me.long = globals()["calib_long"]
me.lat = globals()["calib_lat"]
me.date = ephem.now()
sidtime = me.sidereal_time()
foo = time.localtime()
if not "calib_prefix" in globals():
pfx = "./"
else:
pfx = globals()["calib_prefix"]
filenamestr = "%s/%04d%02d%02d%02d" % (pfx, foo.tm_year,
foo.tm_mon, foo.tm_mday, foo.tm_hour)
now = time.time()
if not "calib_then" in globals():
globals()["calib_then"] = now
delta = (l/1024)*5
if (now - globals()["calib_then"]) >= delta:
globals()["calib_then"] = now
numogate_file = open (filenamestr+".sdat","a")
r = data
inter = globals()["calib_decln"]
fc = globals()["calib_freq_setting"]
fc = fc / 1000000
bw = globals()["calib_bw_setting"]
bw = bw / 1000000
av = globals()["calib_avg_alpha"]
numogate_file.write("data:"+str(ephem.hours(sidtime))+" Dn="+str(inter)+",Fc="+str(fc)+",Bw="+str(bw)+",Av="+str(av))
numogate_file.write(" "+str(r)+"\n")
numogate_file.close()
return(r)
return(data)
def calib_default_fft(db,l):
return(db)
#
# We capture various parameters from the receive chain here, because
# they can affect the calibration equations.
#
#
def calib_set_gain(gain):
globals()["calib_gain_setting"] = gain
globals()["calib_then_tpdat"] = time.time() - 50
def calib_set_integ(integ):
globals()["calib_integ_setting"] = integ
globals()["calib_then_tpdat"] = time.time() - 50
def calib_set_bw(bw):
globals()["calib_bw_setting"] = bw
globals()["calib_then_tpdat"] = time.time() - 50
def calib_set_freq(freq):
globals()["calib_freq_setting"] = freq
globals()["calib_then_tpdat"] = time.time() - 50
def calib_set_avg_alpha(alpha):
globals()["calib_avg_alpha"] = alpha
def calib_set_interesting(inter):
globals()["calib_is_interesting"] = inter
def calib_set_decln(dec):
globals()["calib_decln"] = dec
globals()["calib_then_tpdat"] = time.time() - 50
def calib_set_prefix(pfx):
globals()["calib_prefix"] = pfx
def calib_set_long(long):
globals()["calib_long"] = long
def calib_set_lat(lat):
globals()["calib_lat"] = lat
| gpl-3.0 |
FedoraScientific/salome-med | src/MEDOP/tut/medcoupling/testmed_gendata.py | 1 | 13570 | #!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2011-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This script illustrates the basic usage of MEDCoupling and MEDLoader
# to generate test data files for various cases of med operation. It
# illustrates also the usage of numpy to specify the values of the
# fields when defined on a cartesian mesh (grid).
# (gboulant - 11/07/2011)
import MEDCoupling as MC
import MEDLoader as ML
import numpy
#
# ===============================================================
# Helper functions to create meshes
# ===============================================================
#
def createGridMesh(meshName, nbCellsX, nbCellsY):
"""
The mesh is created using MEDCoupling. The code below creates a
cartesian mesh as a grid with nbCellsX segments in the X direction
and nbCellsY in the Y direction (nb. cells = nbCellsX * nbCellsY)
"""
print "Creating grid mesh of size %sx%s"%(nbCellsX, nbCellsY)
cmesh=MC.MEDCouplingCMesh.New();
# Create X coordinates
nbNodesX = nbCellsX+1
stepX = 0.1
arrX = [float(i * stepX) for i in range(nbNodesX)]
coordsX=MC.DataArrayDouble.New()
coordsX.setValues(arrX,nbNodesX,1)
# Create Y coordinates
nbNodesY = nbCellsY+1
stepY = 0.1
arrY=[float(i * stepY) for i in range(nbNodesY)]
coordsY=MC.DataArrayDouble.New()
coordsY.setValues(arrY,nbNodesY,1)
# Create the grid
cmesh.setCoords(coordsX,coordsY)
cmesh.setName(meshName)
return cmesh
def unstructuredMesh(cartesianMesh):
"""
Convert the cartesian mesh in unstructured mesh for the need of
write function of MEDLoader
"""
print "Creating unstructured mesh from %s"%(cartesianMesh.getName())
umesh=cartesianMesh.buildUnstructured();
umesh.setName(cartesianMesh.getName())
return umesh
#
# ===============================================================
# Creating a cartesian mesh
# ===============================================================
#
# The size is the number of discrete values in a direction, and then
# corresponds to the number of cells in that direction.
size=80
#size=512
# >>>
# WARNING: remember the problem of tics and spaces. The parameter
# "size" is considered to be a number of cells (intervals). The number
# of nodes in that direction is size+1.
# <<<
nbCellsX = size
nbNodesX = nbCellsX+1
nbCellsY = size # The size could be different than the X size
nbNodesY = nbCellsY+1
meshName = "Grid_%sx%s"%(nbCellsX, nbCellsY)
cmesh = createGridMesh(meshName, nbCellsX, nbCellsY)
umesh = unstructuredMesh(cmesh)
medFileName="gendata.med"
ML.MEDLoader.WriteUMesh(medFileName,umesh,True);
#
# ===============================================================
# Creating a scalar field, working with numpy
# ===============================================================
#
def createField(fieldName,gridMesh,
numpy2Darray,typeOfField=MC.ON_CELLS,
iteration=0):
"""
The number of values for the fields is deduced from the sizes of
the numpy array. If typeOfField is ON_CELLS, the size is considered
as the number of cells, otherwise it's considered as the number of
nodes. In any case, it must be consistent with the dimensions of
the numpy 2D array.
"""
print "Creating field %s with iteration=%s"%(fieldName,iteration)
# The sizes are deduced from the numpy array. Note that if
# typeOfField is ON_CELLS, then the size should correspond to the
# number of cells, while if typeOfField is ON_NODES, then the size
# should correspond to the number of nodes
[sizeX,sizeY] = numpy2Darray.shape
# We first have to reshape the 2D numpy array in a 1D vector that
# concatenate all the rows
data=numpy2Darray.reshape(1,sizeX*sizeY)[0]
# Then, we can create a simple list as required by the MEDCoupling
# DataArrayDouble. Note also the usage of float type because
# MEDCoupling works only with real numbers
listdata=list(data)
# Create the field using the list obtained from the numpy array
field = MC.MEDCouplingFieldDouble.New(typeOfField,MC.ONE_TIME);
field.setName(fieldName);
field.setMesh(gridMesh);
field.setIteration(iteration)
field.setTimeValue(float(iteration))
nbComponents=1 # Only one single component for a scalar field
nbCells=sizeX*sizeY
dataArray=MC.DataArrayDouble.New();
dataArray.setValues(listdata,nbCells,nbComponents)
field.setArray(dataArray);
return field
def writeField(fieldName, numpy2Darray,
typeOfField=MC.ON_CELLS,
iteration=0):
field = createField(fieldName, umesh, numpy2Darray,
typeOfField, iteration)
createFromScratch=False
ML.MEDLoader.WriteField(medFileName,field,createFromScratch)
def createTestNumpy2DArray(sizeX, sizeY):
"""
This illustrates how to create a numpy 2D array for input of the
createField function.
"""
rows=[]
for irow in range(sizeY):
row = numpy.arange(start = irow*sizeY,
stop = irow*sizeY+sizeX,
step = 1,
dtype='float64')
rows.append(row)
numpy2Darray = numpy.vstack(rows)
return numpy2Darray
def createTestFieldOnCells():
# Test field on cells
numpy2Darray = createTestNumpy2DArray(sizeX=nbCellsX, sizeY=nbCellsY)
writeField("FieldOnCells", numpy2Darray,
typeOfField=MC.ON_CELLS)
def createTestFieldOnNodes():
# Test field on nodes
numpy2Darray = createTestNumpy2DArray(sizeX=nbNodesX, sizeY=nbNodesY)
writeField("FieldOnNodes", numpy2Darray,
typeOfField=MC.ON_NODES)
#
# =================================================
# Creating a time series
# =================================================
#
# -------------------------------------------------
# Simple demo of the principles
# -------------------------------------------------
# In these functions, (x,y) are the indexes of the element in the
# numpy array. Note that theses indexes maps the indexes of the
# cartesian mesh.
# A function can be a simple python function ...
def f1(x,y):
z = 10*x
print "x=%s\ny=%s\nz=%s"%(x,y,z)
return z
# ... but also a more sophisticated callable object, for example to
# defines some parameters
class Function(object):
def __init__(self, sizeX, sizeY, param):
self.sizeX = sizeX
self.sizeY = sizeY
self.param = param
def function(self, x,y):
z = self.param*x
print "x=%s\ny=%s\nz=%s"%(x,y,z)
return z
def __call__(self, x,y):
return self.function(x,y)
fOnNodes=Function(sizeX=nbNodesX, sizeY=nbNodesY, param=10)
fOnCells=Function(sizeX=nbCellsX, sizeY=nbCellsY, param=3)
def createFunctionField_01():
sizeX=nbNodesX
sizeY=nbNodesY
typeOfField=MC.ON_NODES
f=fOnNodes
numpy2Darray = numpy.fromfunction(f,(sizeX,sizeY),dtype='float64')
writeField("FieldOnNodesUsingFunc", numpy2Darray,typeOfField)
f=fOnCells
sizeX=nbCellsX
sizeY=nbCellsY
typeOfField=MC.ON_CELLS
numpy2Darray = numpy.fromfunction(f,(sizeX,sizeY),dtype='float64')
writeField("FieldOnCellsUsingFunc", numpy2Darray,typeOfField)
# -------------------------------------------------
# Using the pyfunctions package to generate data
# -------------------------------------------------
def createNumpy2DArrayWithFunc(sizeX, sizeY, function):
"""
@function : a callable than can be used as a function of X.
Typically function should be an instance of Function object
defined in pyfunctions.functions.
"""
# X coordinates should range between 0 and 1 to use the normalized
# functions. We have to generate sizeX points:
step=1./sizeX
arrX=[float(i * step) for i in range(sizeX)]
values = function(arrX)
# Then on can create the base row for the numpy 2D array
rowX = numpy.array(values)
# and replicate this row along the Y axis
rows=[]
for irow in range(sizeY):
rows.append(rowX)
numpy2Darray = numpy.vstack(rows)
return numpy2Darray
from pyfunctions.functions import FuncStiffPulse
def createNumpy2DArrayWithFuncStiff(sizeX, sizeY):
f=FuncStiffPulse(xlimit=0.3,stiffness=30,nbPeriods=10)
return createNumpy2DArrayWithFunc(sizeX, sizeY, f)
def createFunctionField_02():
sizeX=nbCellsX
sizeY=nbCellsY
typeOfField=MC.ON_CELLS
numpy2Darray = createNumpy2DArrayWithFuncStiff(sizeX,sizeY)
writeField("FieldOnCellsUsingFunc02", numpy2Darray,typeOfField)
sizeX=nbNodesX
sizeY=nbNodesY
typeOfField=MC.ON_NODES
numpy2Darray = createNumpy2DArrayWithFuncStiff(sizeX,sizeY)
writeField("FieldOnNodesUsingFunc02", numpy2Darray,typeOfField)
#
# =================================================
# Functions to create custom fields for MEDOP tests
# =================================================
#
def createTimeSeries():
"""
Create a single med file with a single mesh and a field defined on
several time steps (time series).
"""
meshName = "Grid_%sx%s"%(nbCellsX, nbCellsY)
cmesh = createGridMesh(meshName, nbCellsX, nbCellsY)
umesh = unstructuredMesh(cmesh)
medFileName="timeseries.med"
ML.MEDLoader.WriteUMesh(medFileName,umesh,True);
sizeX=nbNodesX
sizeY=nbNodesY
typeOfField=MC.ON_NODES
nbIterations=10
pulseStiffNess = 20
pulseNbPeriods = 10
for iteration in range(nbIterations):
xlimit = float(iteration)/float(nbIterations)
f=FuncStiffPulse(xlimit,stiffness=pulseStiffNess,nbPeriods=pulseNbPeriods)
numpy2Darray = createNumpy2DArrayWithFunc(sizeX,sizeY,f)
field = createField("Pulse",umesh,numpy2Darray,typeOfField,iteration)
ML.MEDLoader.WriteField(medFileName,field,False)
from pyfunctions.functions import FuncStiffExp
def createParametrics():
"""
Create 2 med files containing each a mesh (identical) and a field
defined on this mesh in each file.
"""
meshName = "Grid_%sx%s_01"%(nbCellsX, nbCellsY)
cmesh = createGridMesh(meshName, nbCellsX, nbCellsY)
umesh = unstructuredMesh(cmesh)
sizeX=nbNodesX
sizeY=nbNodesY
typeOfField=MC.ON_NODES
medFileName="parametric_01.med"
ML.MEDLoader.WriteUMesh(medFileName,umesh,True);
f=FuncStiffExp(xlimit=0.3,stiffness=30)
numpy2Darray = createNumpy2DArrayWithFunc(sizeX,sizeY,f)
fieldName = "StiffExp_01"
field = createField(fieldName,umesh, numpy2Darray,typeOfField)
ML.MEDLoader.WriteField(medFileName,field,False)
medFileName="parametric_02.med"
umesh.setName("Grid_%sx%s_02"%(nbCellsX, nbCellsY))
ML.MEDLoader.WriteUMesh(medFileName,umesh,True);
f=FuncStiffExp(xlimit=0.4,stiffness=30)
numpy2Darray = createNumpy2DArrayWithFunc(sizeX,sizeY,f)
fieldName = "StiffExp_02"
field = createField(fieldName,umesh, numpy2Darray,typeOfField)
ML.MEDLoader.WriteField(medFileName,field,False)
def createParametrics_demo():
"""
Create 2 med files containing each a mesh (identical) and a field
defined on this mesh in each file.
"""
meshName = "mesh1"
cmesh = createGridMesh(meshName, nbCellsX, nbCellsY)
umesh = unstructuredMesh(cmesh)
sizeX=nbNodesX
sizeY=nbNodesY
typeOfField=MC.ON_NODES
listIteration = [0,1,2,3,4]
medFileName="parametric_01.med"
ML.MEDLoader.WriteUMesh(medFileName,umesh,True);
fieldName = "field1"
for iteration in listIteration:
#f=FuncStiffPulse(xlimit=0.3+0.1*iteration,stiffness=10,nbPeriods=5)
f=FuncStiffExp(xlimit=0.3+0.1*iteration,stiffness=10)
numpy2Darray = createNumpy2DArrayWithFunc(sizeX,sizeY,f)
field = createField(fieldName,umesh, numpy2Darray,typeOfField,iteration)
ML.MEDLoader.WriteField(medFileName,field,False)
medFileName="parametric_02.med"
umesh.setName("mesh2")
ML.MEDLoader.WriteUMesh(medFileName,umesh,True);
fieldName = "field2"
for iteration in listIteration:
#f=FuncStiffPulse(xlimit=0.3+0.1*iteration,stiffness=10,nbPeriods=6)
f=FuncStiffExp(xlimit=0.3+0.1*iteration,stiffness=15)
numpy2Darray = createNumpy2DArrayWithFunc(sizeX,sizeY,f)
field = createField(fieldName,umesh, numpy2Darray,typeOfField,iteration)
ML.MEDLoader.WriteField(medFileName,field,False)
#
# =================================================
# Main runner
# =================================================
#
if __name__ == "__main__":
#createTestFieldOnCells()
#createTestFieldOnNodes()
#createFunctionField_01()
#createFunctionField_02()
#createTimeSeries()
createParametrics_demo()
| lgpl-2.1 |
guilhermebr/python-docx | features/steps/cell.py | 6 | 1060 | # encoding: utf-8
"""
Step implementations for table cell-related features
"""
from __future__ import absolute_import, print_function, unicode_literals
from behave import given, then, when
from docx import Document
# given ===================================================
@given('a table cell')
def given_a_table_cell(context):
table = Document().add_table(rows=2, cols=2)
context.cell = table.cell(0, 0)
# when =====================================================
@when('I assign a string to the cell text attribute')
def when_assign_string_to_cell_text_attribute(context):
cell = context.cell
text = 'foobar'
cell.text = text
context.expected_text = text
# then =====================================================
@then('the cell contains the string I assigned')
def then_cell_contains_string_assigned(context):
cell, expected_text = context.cell, context.expected_text
text = cell.paragraphs[0].runs[0].text
msg = "expected '%s', got '%s'" % (expected_text, text)
assert text == expected_text, msg
| mit |
LEXmono/q | bot/commands/pugbomb.py | 1 | 1868 | from bot.command_map import command_map
import logging
import os
import random
import requests
giphy_key = os.environ['GIPHY_API_KEY']
giphy_url = 'https://api.giphy.com/v1/gifs/search'
logger = logging.getLogger()
# Custom Giphy call to get multiple photos.
@command_map.register_command()
def pugbomb(*args, **kwargs):
'''
You know you want to.
--------------------------------------------------------
*Usage:*
`!pugbomb` and all days will be made great.
--------------------------------------------------------
'''
giphy_term = 'Pug'
response = {
"text": "",
"attachments": []
}
params = {
'api_key': giphy_key,
'q': giphy_term,
'limit': 15,
'lang': 'en',
'offset': random.choice([x for x in range(0, 17)])
}
request = requests.get(giphy_url, params=params)
if request.ok:
if len(request.json()['data']) > 0:
data = request.json()['data']
random.shuffle(data)
for entry in data:
gif_url = entry['images']['downsized']['url']
logger.debug("Successfully retrieved Gif: {}".format(gif_url))
response['attachments'].append(
{
"fallback": gif_url,
"image_url": gif_url
}
)
else:
logger.debug("No Gif Found for search term")
gif_url = 'https://goo.gl/5qdyMJ'
response = {
"attachments": [
{
"fallback": gif_url,
"image_url": gif_url
}
]
}
return response
else:
logger.error("Unable to get Gif from Giphy: {}".format(
request.content))
return False
| apache-2.0 |
n-west/gnuradio | gr-analog/python/analog/qa_dpll.py | 47 | 2019 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, analog, blocks
class test_dpll_bb(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_dpll_bb_001(self):
# Test set/gets
period = 1.0
gain = 0.1
op = analog.dpll_bb(period, gain)
op.set_gain(0.2)
g = op.gain()
self.assertAlmostEqual(g, 0.2)
f = op.freq()
self.assertEqual(1/period, f)
d0 = 1.0 - 0.5*f;
d1 = op.decision_threshold()
self.assertAlmostEqual(d0, d1)
p = op.phase()
self.assertEqual(0, p)
def test_dpll_bb_002(self):
period = 4
gain = 0.1
src_data = 10*((period-1)*[0,] + [1,])
expected_result = src_data
src = blocks.vector_source_b(src_data)
op = analog.dpll_bb(period, gain)
dst = blocks.vector_sink_b()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 4)
if __name__ == '__main__':
gr_unittest.run(test_dpll_bb, "test_dpll_bb.xml")
| gpl-3.0 |
adazey/Muzez | libs/youtube_dl/extractor/myspass.py | 61 | 2728 | from __future__ import unicode_literals
import os.path
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
)
class MySpassIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?myspass\.de/.*'
_TEST = {
'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/',
'md5': '0b49f4844a068f8b33f4b7c88405862b',
'info_dict': {
'id': '11741',
'ext': 'mp4',
'description': 'Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?',
'title': 'Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2',
},
}
def _real_extract(self, url):
META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
# video id is the last path element of the URL
# usually there is a trailing slash, so also try the second but last
url_path = compat_urllib_parse_urlparse(url).path
url_parent_path, video_id = os.path.split(url_path)
if not video_id:
_, video_id = os.path.split(url_parent_path)
# get metadata
metadata_url = META_DATA_URL_TEMPLATE % video_id
metadata = self._download_xml(
metadata_url, video_id, transform_source=lambda s: s.strip())
# extract values from metadata
url_flv_el = metadata.find('url_flv')
if url_flv_el is None:
raise ExtractorError('Unable to extract download url')
video_url = url_flv_el.text
title_el = metadata.find('title')
if title_el is None:
raise ExtractorError('Unable to extract title')
title = title_el.text
format_id_el = metadata.find('format_id')
if format_id_el is None:
format = 'mp4'
else:
format = format_id_el.text
description_el = metadata.find('description')
if description_el is not None:
description = description_el.text
else:
description = None
imagePreview_el = metadata.find('imagePreview')
if imagePreview_el is not None:
thumbnail = imagePreview_el.text
else:
thumbnail = None
return {
'id': video_id,
'url': video_url,
'title': title,
'format': format,
'thumbnail': thumbnail,
'description': description,
}
| gpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/cffi-1.1.2/cffi/recompiler.py | 5 | 50042 | import os, sys, io
from . import ffiplatform, model
from .cffi_opcode import *
VERSION = "0x2601"
try:
int_type = (int, long)
except NameError: # Python 3
int_type = int
class GlobalExpr:
def __init__(self, name, address, type_op, size=0, check_value=0):
self.name = name
self.address = address
self.type_op = type_op
self.size = size
self.check_value = check_value
def as_c_expr(self):
return ' { "%s", (void *)%s, %s, (void *)%s },' % (
self.name, self.address, self.type_op.as_c_expr(), self.size)
def as_python_expr(self):
return "b'%s%s',%d" % (self.type_op.as_python_bytes(), self.name,
self.check_value)
class FieldExpr:
def __init__(self, name, field_offset, field_size, fbitsize, field_type_op):
self.name = name
self.field_offset = field_offset
self.field_size = field_size
self.fbitsize = fbitsize
self.field_type_op = field_type_op
def as_c_expr(self):
spaces = " " * len(self.name)
return (' { "%s", %s,\n' % (self.name, self.field_offset) +
' %s %s,\n' % (spaces, self.field_size) +
' %s %s },' % (spaces, self.field_type_op.as_c_expr()))
def as_python_expr(self):
raise NotImplementedError
def as_field_python_expr(self):
if self.field_type_op.op == OP_NOOP:
size_expr = ''
elif self.field_type_op.op == OP_BITFIELD:
size_expr = format_four_bytes(self.fbitsize)
else:
raise NotImplementedError
return "b'%s%s%s'" % (self.field_type_op.as_python_bytes(),
size_expr,
self.name)
class StructUnionExpr:
def __init__(self, name, type_index, flags, size, alignment, comment,
first_field_index, c_fields):
self.name = name
self.type_index = type_index
self.flags = flags
self.size = size
self.alignment = alignment
self.comment = comment
self.first_field_index = first_field_index
self.c_fields = c_fields
def as_c_expr(self):
return (' { "%s", %d, %s,' % (self.name, self.type_index, self.flags)
+ '\n %s, %s, ' % (self.size, self.alignment)
+ '%d, %d ' % (self.first_field_index, len(self.c_fields))
+ ('/* %s */ ' % self.comment if self.comment else '')
+ '},')
def as_python_expr(self):
flags = eval(self.flags, G_FLAGS)
fields_expr = [c_field.as_field_python_expr()
for c_field in self.c_fields]
return "(b'%s%s%s',%s)" % (
format_four_bytes(self.type_index),
format_four_bytes(flags),
self.name,
','.join(fields_expr))
class EnumExpr:
def __init__(self, name, type_index, size, signed, allenums):
self.name = name
self.type_index = type_index
self.size = size
self.signed = signed
self.allenums = allenums
def as_c_expr(self):
return (' { "%s", %d, _cffi_prim_int(%s, %s),\n'
' "%s" },' % (self.name, self.type_index,
self.size, self.signed, self.allenums))
def as_python_expr(self):
prim_index = {
(1, 0): PRIM_UINT8, (1, 1): PRIM_INT8,
(2, 0): PRIM_UINT16, (2, 1): PRIM_INT16,
(4, 0): PRIM_UINT32, (4, 1): PRIM_INT32,
(8, 0): PRIM_UINT64, (8, 1): PRIM_INT64,
}[self.size, self.signed]
return "b'%s%s%s\\x00%s'" % (format_four_bytes(self.type_index),
format_four_bytes(prim_index),
self.name, self.allenums)
class TypenameExpr:
def __init__(self, name, type_index):
self.name = name
self.type_index = type_index
def as_c_expr(self):
return ' { "%s", %d },' % (self.name, self.type_index)
def as_python_expr(self):
return "b'%s%s'" % (format_four_bytes(self.type_index), self.name)
# ____________________________________________________________
class Recompiler:
def __init__(self, ffi, module_name, target_is_python=False):
self.ffi = ffi
self.module_name = module_name
self.target_is_python = target_is_python
def collect_type_table(self):
self._typesdict = {}
self._generate("collecttype")
#
all_decls = sorted(self._typesdict, key=str)
#
# prepare all FUNCTION bytecode sequences first
self.cffi_types = []
for tp in all_decls:
if tp.is_raw_function:
assert self._typesdict[tp] is None
self._typesdict[tp] = len(self.cffi_types)
self.cffi_types.append(tp) # placeholder
for tp1 in tp.args:
assert isinstance(tp1, (model.VoidType,
model.BasePrimitiveType,
model.PointerType,
model.StructOrUnionOrEnum,
model.FunctionPtrType))
if self._typesdict[tp1] is None:
self._typesdict[tp1] = len(self.cffi_types)
self.cffi_types.append(tp1) # placeholder
self.cffi_types.append('END') # placeholder
#
# prepare all OTHER bytecode sequences
for tp in all_decls:
if not tp.is_raw_function and self._typesdict[tp] is None:
self._typesdict[tp] = len(self.cffi_types)
self.cffi_types.append(tp) # placeholder
if tp.is_array_type and tp.length is not None:
self.cffi_types.append('LEN') # placeholder
assert None not in self._typesdict.values()
#
# collect all structs and unions and enums
self._struct_unions = {}
self._enums = {}
for tp in all_decls:
if isinstance(tp, model.StructOrUnion):
self._struct_unions[tp] = None
elif isinstance(tp, model.EnumType):
self._enums[tp] = None
for i, tp in enumerate(sorted(self._struct_unions,
key=lambda tp: tp.name)):
self._struct_unions[tp] = i
for i, tp in enumerate(sorted(self._enums,
key=lambda tp: tp.name)):
self._enums[tp] = i
#
# emit all bytecode sequences now
for tp in all_decls:
method = getattr(self, '_emit_bytecode_' + tp.__class__.__name__)
method(tp, self._typesdict[tp])
#
# consistency check
for op in self.cffi_types:
assert isinstance(op, CffiOp)
self.cffi_types = tuple(self.cffi_types) # don't change any more
def _do_collect_type(self, tp):
if not isinstance(tp, model.BaseTypeByIdentity):
if isinstance(tp, tuple):
for x in tp:
self._do_collect_type(x)
return
if tp not in self._typesdict:
self._typesdict[tp] = None
if isinstance(tp, model.FunctionPtrType):
self._do_collect_type(tp.as_raw_function())
elif isinstance(tp, model.StructOrUnion):
if tp.fldtypes is not None and (
tp not in self.ffi._parser._included_declarations):
for name1, tp1, _ in tp.enumfields():
self._do_collect_type(self._field_type(tp, name1, tp1))
else:
for _, x in tp._get_items():
self._do_collect_type(x)
def _get_declarations(self):
return sorted(self.ffi._parser._declarations.items())
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise ffiplatform.VerificationError(
"not implemented in recompile(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
# ----------
ALL_STEPS = ["global", "field", "struct_union", "enum", "typename"]
def collect_step_tables(self):
# collect the declarations for '_cffi_globals', '_cffi_typenames', etc.
self._lsts = {}
for step_name in self.ALL_STEPS:
self._lsts[step_name] = []
self._seen_struct_unions = set()
self._generate("ctx")
self._add_missing_struct_unions()
#
for step_name in self.ALL_STEPS:
lst = self._lsts[step_name]
if step_name != "field":
lst.sort(key=lambda entry: entry.name)
self._lsts[step_name] = tuple(lst) # don't change any more
#
# check for a possible internal inconsistency: _cffi_struct_unions
# should have been generated with exactly self._struct_unions
lst = self._lsts["struct_union"]
for tp, i in self._struct_unions.items():
assert i < len(lst)
assert lst[i].name == tp.name
assert len(lst) == len(self._struct_unions)
# same with enums
lst = self._lsts["enum"]
for tp, i in self._enums.items():
assert i < len(lst)
assert lst[i].name == tp.name
assert len(lst) == len(self._enums)
# ----------
def _prnt(self, what=''):
self._f.write(what + '\n')
def write_source_to_f(self, f, preamble):
if self.target_is_python:
assert preamble is None
self.write_py_source_to_f(f)
else:
assert preamble is not None
self.write_c_source_to_f(f, preamble)
def _rel_readlines(self, filename):
g = open(os.path.join(os.path.dirname(__file__), filename), 'r')
lines = g.readlines()
g.close()
return lines
def write_c_source_to_f(self, f, preamble):
self._f = f
prnt = self._prnt
#
# first the '#include' (actually done by inlining the file's content)
lines = self._rel_readlines('_cffi_include.h')
i = lines.index('#include "parse_c_type.h"\n')
lines[i:i+1] = self._rel_readlines('parse_c_type.h')
prnt(''.join(lines))
#
# then paste the C source given by the user, verbatim.
prnt('/************************************************************/')
prnt()
prnt(preamble)
prnt()
prnt('/************************************************************/')
prnt()
#
# the declaration of '_cffi_types'
prnt('static void *_cffi_types[] = {')
typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()])
for i, op in enumerate(self.cffi_types):
comment = ''
if i in typeindex2type:
comment = ' // ' + typeindex2type[i]._get_c_name()
prnt('/* %2d */ %s,%s' % (i, op.as_c_expr(), comment))
if not self.cffi_types:
prnt(' 0')
prnt('};')
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._seen_constants = set()
self._generate("decl")
#
# the declaration of '_cffi_globals' and '_cffi_typenames'
nums = {}
for step_name in self.ALL_STEPS:
lst = self._lsts[step_name]
nums[step_name] = len(lst)
if nums[step_name] > 0:
prnt('static const struct _cffi_%s_s _cffi_%ss[] = {' % (
step_name, step_name))
for entry in lst:
prnt(entry.as_c_expr())
prnt('};')
prnt()
#
# the declaration of '_cffi_includes'
if self.ffi._included_ffis:
prnt('static const char * const _cffi_includes[] = {')
for ffi_to_include in self.ffi._included_ffis:
try:
included_module_name, included_source = (
ffi_to_include._assigned_source[:2])
except AttributeError:
raise ffiplatform.VerificationError(
"ffi object %r includes %r, but the latter has not "
"been prepared with set_source()" % (
self.ffi, ffi_to_include,))
if included_source is None:
raise ffiplatform.VerificationError(
"not implemented yet: ffi.include() of a Python-based "
"ffi inside a C-based ffi")
prnt(' "%s",' % (included_module_name,))
prnt(' NULL')
prnt('};')
prnt()
#
# the declaration of '_cffi_type_context'
prnt('static const struct _cffi_type_context_s _cffi_type_context = {')
prnt(' _cffi_types,')
for step_name in self.ALL_STEPS:
if nums[step_name] > 0:
prnt(' _cffi_%ss,' % step_name)
else:
prnt(' NULL, /* no %ss */' % step_name)
for step_name in self.ALL_STEPS:
if step_name != "field":
prnt(' %d, /* num_%ss */' % (nums[step_name], step_name))
if self.ffi._included_ffis:
prnt(' _cffi_includes,')
else:
prnt(' NULL, /* no includes */')
prnt(' %d, /* num_types */' % (len(self.cffi_types),))
prnt(' 0, /* flags */')
prnt('};')
prnt()
#
# the init function
base_module_name = self.module_name.split('.')[-1]
prnt('#ifdef PYPY_VERSION')
prnt('PyMODINIT_FUNC')
prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,))
prnt('{')
prnt(' p[0] = (const void *)%s;' % VERSION)
prnt(' p[1] = &_cffi_type_context;')
prnt('}')
# on Windows, distutils insists on putting init_cffi_xyz in
# 'export_symbols', so instead of fighting it, just give up and
# give it one
prnt('# ifdef _MSC_VER')
prnt(' PyMODINIT_FUNC')
prnt('# if PY_MAJOR_VERSION >= 3')
prnt(' PyInit_%s(void) { return NULL; }' % (base_module_name,))
prnt('# else')
prnt(' init%s(void) { }' % (base_module_name,))
prnt('# endif')
prnt('# endif')
prnt('#elif PY_MAJOR_VERSION >= 3')
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % (base_module_name,))
prnt('{')
prnt(' return _cffi_init("%s", %s, &_cffi_type_context);' % (
self.module_name, VERSION))
prnt('}')
prnt('#else')
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % (base_module_name,))
prnt('{')
prnt(' _cffi_init("%s", %s, &_cffi_type_context);' % (
self.module_name, VERSION))
prnt('}')
prnt('#endif')
def _to_py(self, x):
if isinstance(x, str):
return "b'%s'" % (x,)
if isinstance(x, (list, tuple)):
rep = [self._to_py(item) for item in x]
if len(rep) == 1:
rep.append('')
return "(%s)" % (','.join(rep),)
return x.as_python_expr() # Py2: unicode unexpected; Py3: bytes unexp.
def write_py_source_to_f(self, f):
self._f = f
prnt = self._prnt
#
# header
prnt("# auto-generated file")
prnt("import _cffi_backend")
#
# the 'import' of the included ffis
num_includes = len(self.ffi._included_ffis or ())
for i in range(num_includes):
ffi_to_include = self.ffi._included_ffis[i]
try:
included_module_name, included_source = (
ffi_to_include._assigned_source[:2])
except AttributeError:
raise ffiplatform.VerificationError(
"ffi object %r includes %r, but the latter has not "
"been prepared with set_source()" % (
self.ffi, ffi_to_include,))
if included_source is not None:
raise ffiplatform.VerificationError(
"not implemented yet: ffi.include() of a C-based "
"ffi inside a Python-based ffi")
prnt('from %s import ffi as _ffi%d' % (included_module_name, i))
prnt()
prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,))
prnt(" _version = %s," % (VERSION,))
#
# the '_types' keyword argument
self.cffi_types = tuple(self.cffi_types) # don't change any more
types_lst = [op.as_python_bytes() for op in self.cffi_types]
prnt(' _types = %s,' % (self._to_py(''.join(types_lst)),))
typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()])
#
# the keyword arguments from ALL_STEPS
for step_name in self.ALL_STEPS:
lst = self._lsts[step_name]
if len(lst) > 0 and step_name != "field":
prnt(' _%ss = %s,' % (step_name, self._to_py(lst)))
#
# the '_includes' keyword argument
if num_includes > 0:
prnt(' _includes = (%s,),' % (
', '.join(['_ffi%d' % i for i in range(num_includes)]),))
#
# the footer
prnt(')')
# ----------
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.BasePrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
# a struct (not a struct pointer) as a function argument
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' if (datasize < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' %s = (%s)alloca((size_t)datasize);' % (
tovar, tp.get_c_name('')))
self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,))
self._prnt(' if (_cffi_convert_array_from_object('
'(char *)%s, _cffi_type(%d), %s) < 0)' % (
tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.BasePrimitiveType):
if tp.is_integer_type():
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructType):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs
def _generate_cpy_typedef_collecttype(self, tp, name):
self._do_collect_type(tp)
def _generate_cpy_typedef_decl(self, tp, name):
pass
def _typedef_ctx(self, tp, name):
type_index = self._typesdict[tp]
self._lsts["typename"].append(TypenameExpr(name, type_index))
def _generate_cpy_typedef_ctx(self, tp, name):
self._typedef_ctx(tp, name)
if getattr(tp, "origin", None) == "unknown_type":
self._struct_ctx(tp, tp.name, approxname=None)
elif isinstance(tp, model.NamedPointerType):
self._struct_ctx(tp.totype, tp.totype.name, approxname=tp.name,
named_ptr=tp)
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
self._do_collect_type(tp.as_raw_function())
if tp.ellipsis and not self.target_is_python:
self._do_collect_type(tp)
def _generate_cpy_function_decl(self, tp, name):
assert not self.target_is_python
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_constant_decl(tp, name)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
#
# ------------------------------
# the 'd' version of the function, only for addressof(lib, 'func')
arguments = []
call_arguments = []
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
arguments.append(type.get_c_name(' x%d' % i, context))
call_arguments.append('x%d' % i)
repr_arguments = ', '.join(arguments)
repr_arguments = repr_arguments or 'void'
name_and_arguments = '_cffi_d_%s(%s)' % (name, repr_arguments)
prnt('static %s' % (tp.result.get_c_name(name_and_arguments),))
prnt('{')
call_arguments = ', '.join(call_arguments)
result_code = 'return '
if isinstance(tp.result, model.VoidType):
result_code = ''
prnt(' %s%s(%s);' % (result_code, name, call_arguments))
prnt('}')
#
prnt('#ifndef PYPY_VERSION') # ------------------------------
#
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
arg = type.get_c_name(' x%d' % i, context)
prnt(' %s;' % arg)
#
localvars = set()
for type in tp.args:
self._extra_local_variables(type, localvars)
for decl in localvars:
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
result_decl = ' %s;' % tp.result.get_c_name(' result', context)
prnt(result_decl)
else:
result_decl = None
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt(' PyObject **aa;')
prnt()
prnt(' aa = _cffi_unpack_args(args, %d, "%s");' % (len(rng), name))
prnt(' if (aa == NULL)')
prnt(' return NULL;')
for i in rng:
prnt(' arg%d = aa[%d];' % (i, i))
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
call_arguments = ['x%d' % i for i in range(len(tp.args))]
call_arguments = ', '.join(call_arguments)
prnt(' { %s%s(%s); }' % (result_code, name, call_arguments))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' return %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
else:
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
#
prnt('#else') # ------------------------------
#
# the PyPy version: need to replace struct/union arguments with
# pointers, and if the result is a struct/union, insert a first
# arg that is a pointer to the result.
difference = False
arguments = []
call_arguments = []
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
indirection = ''
if isinstance(type, model.StructOrUnion):
indirection = '*'
difference = True
arg = type.get_c_name(' %sx%d' % (indirection, i), context)
arguments.append(arg)
call_arguments.append('%sx%d' % (indirection, i))
tp_result = tp.result
if isinstance(tp_result, model.StructOrUnion):
context = 'result of %s' % name
arg = tp_result.get_c_name(' *result', context)
arguments.insert(0, arg)
tp_result = model.void_type
result_decl = None
result_code = '*result = '
difference = True
if difference:
repr_arguments = ', '.join(arguments)
repr_arguments = repr_arguments or 'void'
name_and_arguments = '_cffi_f_%s(%s)' % (name, repr_arguments)
prnt('static %s' % (tp_result.get_c_name(name_and_arguments),))
prnt('{')
if result_decl:
prnt(result_decl)
call_arguments = ', '.join(call_arguments)
prnt(' { %s%s(%s); }' % (result_code, name, call_arguments))
if result_decl:
prnt(' return result;')
prnt('}')
else:
prnt('# define _cffi_f_%s _cffi_d_%s' % (name, name))
#
prnt('#endif') # ------------------------------
prnt()
def _generate_cpy_function_ctx(self, tp, name):
if tp.ellipsis and not self.target_is_python:
self._generate_cpy_constant_ctx(tp, name)
return
type_index = self._typesdict[tp.as_raw_function()]
numargs = len(tp.args)
if self.target_is_python:
meth_kind = OP_DLOPEN_FUNC
elif numargs == 0:
meth_kind = OP_CPYTHON_BLTN_N # 'METH_NOARGS'
elif numargs == 1:
meth_kind = OP_CPYTHON_BLTN_O # 'METH_O'
else:
meth_kind = OP_CPYTHON_BLTN_V # 'METH_VARARGS'
self._lsts["global"].append(
GlobalExpr(name, '_cffi_f_%s' % name,
CffiOp(meth_kind, type_index),
size='_cffi_d_%s' % name))
# ----------
# named structs or unions
def _field_type(self, tp_struct, field_name, tp_field):
if isinstance(tp_field, model.ArrayType) and tp_field.length == '...':
ptr_struct_name = tp_struct.get_c_name('*')
actual_length = '_cffi_array_len(((%s)0)->%s)' % (
ptr_struct_name, field_name)
tp_item = self._field_type(tp_struct, '%s[0]' % field_name,
tp_field.item)
tp_field = model.ArrayType(tp_item, actual_length)
return tp_field
def _struct_collecttype(self, tp):
self._do_collect_type(tp)
def _struct_decl(self, tp, cname, approxname):
if tp.fldtypes is None:
return
prnt = self._prnt
checkfuncname = '_cffi_checkfld_%s' % (approxname,)
prnt('_CFFI_UNUSED_FN')
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize in tp.enumfields():
try:
if ftype.is_integer_type() or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(" (void)((p->%s) << 1); /* check that '%s.%s' is "
"an integer */" % (fname, cname, fname))
continue
# only accept exactly the type declared, except that '[]'
# is interpreted as a '*' and so will match any array length.
# (It would also match '*', but that's harder to detect...)
while (isinstance(ftype, model.ArrayType)
and (ftype.length is None or ftype.length == '...')):
ftype = ftype.item
fname = fname + '[0]'
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname), fname))
except ffiplatform.VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('struct _cffi_align_%s { char x; %s y; };' % (approxname, cname))
prnt()
def _struct_ctx(self, tp, cname, approxname, named_ptr=None):
type_index = self._typesdict[tp]
reason_for_not_expanding = None
flags = []
if isinstance(tp, model.UnionType):
flags.append("_CFFI_F_UNION")
if tp.fldtypes is None:
flags.append("_CFFI_F_OPAQUE")
reason_for_not_expanding = "opaque"
if (tp not in self.ffi._parser._included_declarations and
(named_ptr is None or
named_ptr not in self.ffi._parser._included_declarations)):
if tp.fldtypes is None:
pass # opaque
elif tp.partial or tp.has_anonymous_struct_fields():
pass # field layout obtained silently from the C compiler
else:
flags.append("_CFFI_F_CHECK_FIELDS")
if tp.packed:
flags.append("_CFFI_F_PACKED")
else:
flags.append("_CFFI_F_EXTERNAL")
reason_for_not_expanding = "external"
flags = '|'.join(flags) or '0'
c_fields = []
if reason_for_not_expanding is None:
enumfields = list(tp.enumfields())
for fldname, fldtype, fbitsize in enumfields:
fldtype = self._field_type(tp, fldname, fldtype)
# cname is None for _add_missing_struct_unions() only
op = OP_NOOP
if fbitsize >= 0:
op = OP_BITFIELD
size = '%d /* bits */' % fbitsize
elif cname is None or (
isinstance(fldtype, model.ArrayType) and
fldtype.length is None):
size = '(size_t)-1'
else:
size = 'sizeof(((%s)0)->%s)' % (
tp.get_c_name('*') if named_ptr is None
else named_ptr.name,
fldname)
if cname is None or fbitsize >= 0:
offset = '(size_t)-1'
elif named_ptr is not None:
offset = '((char *)&((%s)0)->%s) - (char *)0' % (
named_ptr.name, fldname)
else:
offset = 'offsetof(%s, %s)' % (tp.get_c_name(''), fldname)
c_fields.append(
FieldExpr(fldname, offset, size, fbitsize,
CffiOp(op, self._typesdict[fldtype])))
first_field_index = len(self._lsts["field"])
self._lsts["field"].extend(c_fields)
#
if cname is None: # unknown name, for _add_missing_struct_unions
size = '(size_t)-2'
align = -2
comment = "unnamed"
else:
if named_ptr is not None:
size = 'sizeof(*(%s)0)' % (named_ptr.name,)
align = '-1 /* unknown alignment */'
else:
size = 'sizeof(%s)' % (cname,)
align = 'offsetof(struct _cffi_align_%s, y)' % (approxname,)
comment = None
else:
size = '(size_t)-1'
align = -1
first_field_index = -1
comment = reason_for_not_expanding
self._lsts["struct_union"].append(
StructUnionExpr(tp.name, type_index, flags, size, align, comment,
first_field_index, c_fields))
self._seen_struct_unions.add(tp)
def _add_missing_struct_unions(self):
# not very nice, but some struct declarations might be missing
# because they don't have any known C name. Check that they are
# not partial (we can't complete or verify them!) and emit them
# anonymously.
for tp in list(self._struct_unions):
if tp not in self._seen_struct_unions:
if tp.partial:
raise NotImplementedError("internal inconsistency: %r is "
"partial but was not seen at "
"this point" % (tp,))
if tp.name.startswith('$') and tp.name[1:].isdigit():
approxname = tp.name[1:]
elif tp.name == '_IO_FILE' and tp.forcename == 'FILE':
approxname = 'FILE'
self._typedef_ctx(tp, 'FILE')
else:
raise NotImplementedError("internal inconsistency: %r" %
(tp,))
self._struct_ctx(tp, None, approxname)
def _generate_cpy_struct_collecttype(self, tp, name):
self._struct_collecttype(tp)
_generate_cpy_union_collecttype = _generate_cpy_struct_collecttype
def _struct_names(self, tp):
cname = tp.get_c_name('')
if ' ' in cname:
return cname, cname.replace(' ', '_')
else:
return cname, '_' + cname
def _generate_cpy_struct_decl(self, tp, name):
self._struct_decl(tp, *self._struct_names(tp))
_generate_cpy_union_decl = _generate_cpy_struct_decl
def _generate_cpy_struct_ctx(self, tp, name):
self._struct_ctx(tp, *self._struct_names(tp))
_generate_cpy_union_ctx = _generate_cpy_struct_ctx
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
def _generate_cpy_anonymous_collecttype(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_collecttype(tp, name)
else:
self._struct_collecttype(tp)
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp)
else:
self._struct_decl(tp, name, 'typedef_' + name)
def _generate_cpy_anonymous_ctx(self, tp, name):
if isinstance(tp, model.EnumType):
self._enum_ctx(tp, name)
else:
self._struct_ctx(tp, name, 'typedef_' + name)
# ----------
# constants, declared with "static const ..."
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
check_value=None):
if (category, name) in self._seen_constants:
raise ffiplatform.VerificationError(
"duplicate declaration of %s '%s'" % (category, name))
self._seen_constants.add((category, name))
#
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
if is_int:
prnt('static int %s(unsigned long long *o)' % funcname)
prnt('{')
prnt(' int n = (%s) <= 0;' % (name,))
prnt(' *o = (unsigned long long)((%s) << 0);'
' /* check that %s is an integer */' % (name, name))
if check_value is not None:
if check_value > 0:
check_value = '%dU' % (check_value,)
prnt(' if (!_cffi_check_int(*o, n, %s))' % (check_value,))
prnt(' n |= 2;')
prnt(' return n;')
prnt('}')
else:
assert check_value is None
prnt('static void %s(char *o)' % funcname)
prnt('{')
prnt(' *(%s)o = %s;' % (tp.get_c_name('*'), name))
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = tp.is_integer_type()
if not is_int or self.target_is_python:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
def _generate_cpy_constant_ctx(self, tp, name):
if not self.target_is_python and tp.is_integer_type():
type_op = CffiOp(OP_CONSTANT_INT, -1)
else:
if not tp.sizeof_enabled():
raise ffiplatform.VerificationError(
"constant '%s' is of type '%s', whose size is not known"
% (name, tp._get_c_name()))
if self.target_is_python:
const_kind = OP_DLOPEN_CONST
else:
const_kind = OP_CONSTANT
type_index = self._typesdict[tp]
type_op = CffiOp(const_kind, type_index)
self._lsts["global"].append(
GlobalExpr(name, '_cffi_const_%s' % name, type_op))
# ----------
# enums
def _generate_cpy_enum_collecttype(self, tp, name):
self._do_collect_type(tp)
def _generate_cpy_enum_decl(self, tp, name=None):
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator)
def _enum_ctx(self, tp, cname):
type_index = self._typesdict[tp]
type_op = CffiOp(OP_ENUM, -1)
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._lsts["global"].append(
GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op,
check_value=enumvalue))
#
if cname is not None and '$' not in cname and not self.target_is_python:
size = "sizeof(%s)" % cname
signed = "((%s)-1) <= 0" % cname
else:
basetp = tp.build_baseinttype(self.ffi, [])
size = self.ffi.sizeof(basetp)
signed = int(int(self.ffi.cast(basetp, -1)) < 0)
allenums = ",".join(tp.enumerators)
self._lsts["enum"].append(
EnumExpr(tp.name, type_index, size, signed, allenums))
def _generate_cpy_enum_ctx(self, tp, name):
self._enum_ctx(tp, tp._get_c_name())
# ----------
# macros: for now only for integers
def _generate_cpy_macro_collecttype(self, tp, name):
pass
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_cpy_const(True, name, check_value=check_value)
def _generate_cpy_macro_ctx(self, tp, name):
if tp == '...':
if self.target_is_python:
raise ffiplatform.VerificationError(
"cannot use the syntax '...' in '#define %s ...' when "
"using the ABI mode" % (name,))
check_value = None
else:
check_value = tp # an integer
type_op = CffiOp(OP_CONSTANT_INT, -1)
self._lsts["global"].append(
GlobalExpr(name, '_cffi_const_%s' % name, type_op,
check_value=check_value))
# ----------
# global variables
def _global_type(self, tp, global_name):
if isinstance(tp, model.ArrayType) and tp.length == '...':
actual_length = '_cffi_array_len(%s)' % (global_name,)
tp_item = self._global_type(tp.item, '%s[0]' % global_name)
tp = model.ArrayType(tp_item, actual_length)
return tp
def _generate_cpy_variable_collecttype(self, tp, name):
self._do_collect_type(self._global_type(tp, name))
def _generate_cpy_variable_decl(self, tp, name):
pass
def _generate_cpy_variable_ctx(self, tp, name):
tp = self._global_type(tp, name)
type_index = self._typesdict[tp]
type_op = CffiOp(OP_GLOBAL_VAR, type_index)
if tp.sizeof_enabled():
size = "sizeof(%s)" % (name,)
else:
size = 0
self._lsts["global"].append(
GlobalExpr(name, '&%s' % name, type_op, size))
# ----------
# emitting the opcodes for individual types
def _emit_bytecode_VoidType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, PRIM_VOID)
def _emit_bytecode_PrimitiveType(self, tp, index):
prim_index = PRIMITIVE_TO_INDEX[tp.name]
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, prim_index)
def _emit_bytecode_UnknownIntegerType(self, tp, index):
s = ('_cffi_prim_int(sizeof(%s), (\n'
' ((%s)-1) << 0 /* check that %s is an integer type */\n'
' ) <= 0)' % (tp.name, tp.name, tp.name))
self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s)
def _emit_bytecode_RawFunctionType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result])
index += 1
for tp1 in tp.args:
realindex = self._typesdict[tp1]
if index != realindex:
if isinstance(tp1, model.PrimitiveType):
self._emit_bytecode_PrimitiveType(tp1, index)
else:
self.cffi_types[index] = CffiOp(OP_NOOP, realindex)
index += 1
self.cffi_types[index] = CffiOp(OP_FUNCTION_END, int(tp.ellipsis))
def _emit_bytecode_PointerType(self, tp, index):
self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype])
_emit_bytecode_ConstPointerType = _emit_bytecode_PointerType
_emit_bytecode_NamedPointerType = _emit_bytecode_PointerType
def _emit_bytecode_FunctionPtrType(self, tp, index):
raw = tp.as_raw_function()
self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[raw])
def _emit_bytecode_ArrayType(self, tp, index):
item_index = self._typesdict[tp.item]
if tp.length is None:
self.cffi_types[index] = CffiOp(OP_OPEN_ARRAY, item_index)
elif tp.length == '...':
raise ffiplatform.VerificationError(
"type %s badly placed: the '...' array length can only be "
"used on global arrays or on fields of structures" % (
str(tp).replace('/*...*/', '...'),))
else:
assert self.cffi_types[index + 1] == 'LEN'
self.cffi_types[index] = CffiOp(OP_ARRAY, item_index)
self.cffi_types[index + 1] = CffiOp(None, str(tp.length))
def _emit_bytecode_StructType(self, tp, index):
struct_index = self._struct_unions[tp]
self.cffi_types[index] = CffiOp(OP_STRUCT_UNION, struct_index)
_emit_bytecode_UnionType = _emit_bytecode_StructType
def _emit_bytecode_EnumType(self, tp, index):
enum_index = self._enums[tp]
self.cffi_types[index] = CffiOp(OP_ENUM, enum_index)
if sys.version_info >= (3,):
NativeIO = io.StringIO
else:
class NativeIO(io.BytesIO):
def write(self, s):
if isinstance(s, unicode):
s = s.encode('ascii')
super(NativeIO, self).write(s)
def _make_c_or_py_source(ffi, module_name, preamble, target_file):
recompiler = Recompiler(ffi, module_name,
target_is_python=(preamble is None))
recompiler.collect_type_table()
recompiler.collect_step_tables()
f = NativeIO()
recompiler.write_source_to_f(f, preamble)
output = f.getvalue()
try:
with open(target_file, 'r') as f1:
if f1.read(len(output) + 1) != output:
raise IOError
return False # already up-to-date
except IOError:
tmp_file = '%s.~%d' % (target_file, os.getpid())
with open(tmp_file, 'w') as f1:
f1.write(output)
try:
os.rename(tmp_file, target_file)
except OSError:
os.unlink(target_file)
os.rename(tmp_file, target_file)
return True
def make_c_source(ffi, module_name, preamble, target_c_file):
assert preamble is not None
return _make_c_or_py_source(ffi, module_name, preamble, target_c_file)
def make_py_source(ffi, module_name, target_py_file):
return _make_c_or_py_source(ffi, module_name, None, target_py_file)
def _modname_to_file(outputdir, modname, extension):
parts = modname.split('.')
try:
os.makedirs(os.path.join(outputdir, *parts[:-1]))
except OSError:
pass
parts[-1] += extension
return os.path.join(outputdir, *parts), parts
def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True,
c_file=None, source_extension='.c', extradir=None, **kwds):
if not isinstance(module_name, str):
module_name = module_name.encode('ascii')
if ffi._windows_unicode:
ffi._apply_windows_unicode(kwds)
if preamble is not None:
if c_file is None:
c_file, parts = _modname_to_file(tmpdir, module_name,
source_extension)
if extradir:
parts = [extradir] + parts
ext_c_file = os.path.join(*parts)
else:
ext_c_file = c_file
ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds)
updated = make_c_source(ffi, module_name, preamble, c_file)
if call_c_compiler:
cwd = os.getcwd()
try:
os.chdir(tmpdir)
outputfilename = ffiplatform.compile('.', ext)
finally:
os.chdir(cwd)
return outputfilename
else:
return ext, updated
else:
if c_file is None:
c_file, _ = _modname_to_file(tmpdir, module_name, '.py')
updated = make_py_source(ffi, module_name, c_file)
if call_c_compiler:
return c_file
else:
return None, updated
def _verify(ffi, module_name, preamble, *args, **kwds):
# FOR TESTS ONLY
from testing.udir import udir
import imp
assert module_name not in sys.modules, "module name conflict: %r" % (
module_name,)
kwds.setdefault('tmpdir', str(udir))
outputfilename = recompile(ffi, module_name, preamble, *args, **kwds)
module = imp.load_dynamic(module_name, outputfilename)
#
# hack hack hack: copy all *bound methods* from module.ffi back to the
# ffi instance. Then calls like ffi.new() will invoke module.ffi.new().
for name in dir(module.ffi):
if not name.startswith('_'):
attr = getattr(module.ffi, name)
if attr is not getattr(ffi, name, object()):
setattr(ffi, name, attr)
def typeof_disabled(*args, **kwds):
raise NotImplementedError
ffi._typeof = typeof_disabled
return module.lib
| mit |
alexdglover/shill-isms | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/compat.py | 2943 | 1157 | ######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
| mit |
technologiescollege/Blockly-rduino-communication | scripts_XP/Lib/site-packages/pygments/lexers/nix.py | 31 | 4031 | # -*- coding: utf-8 -*-
"""
pygments.lexers.nix
~~~~~~~~~~~~~~~~~~~
Lexers for the NixOS Nix language.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal
__all__ = ['NixLexer']
class NixLexer(RegexLexer):
"""
For the `Nix language <http://nixos.org/nix/>`_.
.. versionadded:: 2.0
"""
name = 'Nix'
aliases = ['nixos', 'nix']
filenames = ['*.nix']
mimetypes = ['text/x-nix']
flags = re.MULTILINE | re.UNICODE
keywords = ['rec', 'with', 'let', 'in', 'inherit', 'assert', 'if',
'else', 'then', '...']
builtins = ['import', 'abort', 'baseNameOf', 'dirOf', 'isNull', 'builtins',
'map', 'removeAttrs', 'throw', 'toString', 'derivation']
operators = ['++', '+', '?', '.', '!', '//', '==',
'!=', '&&', '||', '->', '=']
punctuations = ["(", ")", "[", "]", ";", "{", "}", ":", ",", "@"]
tokens = {
'root': [
# comments starting with #
(r'#.*$', Comment.Single),
# multiline comments
(r'/\*', Comment.Multiline, 'comment'),
# whitespace
(r'\s+', Text),
# keywords
('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in keywords), Keyword),
# highlight the builtins
('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins),
Name.Builtin),
(r'\b(true|false|null)\b', Name.Constant),
# operators
('(%s)' % '|'.join(re.escape(entry) for entry in operators),
Operator),
# word operators
(r'\b(or|and)\b', Operator.Word),
# punctuations
('(%s)' % '|'.join(re.escape(entry) for entry in punctuations), Punctuation),
# integers
(r'[0-9]+', Number.Integer),
# strings
(r'"', String.Double, 'doublequote'),
(r"''", String.Single, 'singlequote'),
# paths
(r'[\w.+-]*(\/[\w.+-]+)+', Literal),
(r'\<[\w.+-]+(\/[\w.+-]+)*\>', Literal),
# urls
(r'[a-zA-Z][a-zA-Z0-9\+\-\.]*\:[\w%/?:@&=+$,\\.!~*\'-]+', Literal),
# names of variables
(r'[\w-]+\s*=', String.Symbol),
(r'[a-zA-Z_][\w\'-]*', Text),
],
'comment': [
(r'[^/*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'singlequote': [
(r"'''", String.Escape),
(r"''\$\{", String.Escape),
(r"''\n", String.Escape),
(r"''\r", String.Escape),
(r"''\t", String.Escape),
(r"''", String.Single, '#pop'),
(r'\$\{', String.Interpol, 'antiquote'),
(r"[^']", String.Single),
],
'doublequote': [
(r'\\', String.Escape),
(r'\\"', String.Escape),
(r'\\$\{', String.Escape),
(r'"', String.Double, '#pop'),
(r'\$\{', String.Interpol, 'antiquote'),
(r'[^"]', String.Double),
],
'antiquote': [
(r"\}", String.Interpol, '#pop'),
# TODO: we should probably escape also here ''${ \${
(r"\$\{", String.Interpol, '#push'),
include('root'),
],
}
def analyse_text(text):
rv = 0.0
# TODO: let/in
if re.search(r'import.+?<[^>]+>', text):
rv += 0.4
if re.search(r'mkDerivation\s+(\(|\{|rec)', text):
rv += 0.4
if re.search(r'=\s+mkIf\s+', text):
rv += 0.4
if re.search(r'\{[a-zA-Z,\s]+\}:', text):
rv += 0.1
return rv
| gpl-3.0 |
elsigh/browserscope | third_party/appengine_tools/devappserver2/admin/datastore_indexes_viewer.py | 17 | 1605 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A datastore indexes viewer UI."""
import collections
from google.appengine.api import datastore
from google.appengine.tools.devappserver2.admin import admin_request_handler
class DatastoreIndexesViewer(admin_request_handler.AdminRequestHandler):
def get(self):
indexes = collections.defaultdict(list)
for index, _ in datastore.GetIndexes():
properties = []
for property_name, sort_direction in index.Properties():
properties.append({
'name': property_name,
'sort_symbol': ('', '▲', '▼')[sort_direction],
'sort_direction': ('', 'Ascending', 'Descending')[sort_direction],
})
kind = str(index.Kind())
indexes[kind].append({
'id': str(index.Id()),
'has_ancestor': bool(index.HasAncestor()),
'properties': properties
})
self.response.write(self.render('datastore_indexes_viewer.html',
{'indexes': sorted(indexes.items())}))
| apache-2.0 |
trnewman/VT-USRP-daughterboard-drivers_python | usrp/host/lib/legacy/check_data.py | 12 | 1448 | #!/usr/bin/env python
#
# Copyright 2003 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
import sys
import struct
fin = sys.stdin
count = 0
expected = 0
last_correction = 0
while 1:
s = fin.read(2)
if not s or len(s) != 2:
break
v, = struct.unpack ('H', s)
iv = int(v) & 0xffff
# print "%8d %6d 0x%04x" % (count, iv, iv)
if count & 0x1: # only counting on the Q channel
if (expected & 0xffff) != iv:
print "%8d (%6d) %6d 0x%04x" % (count, count - last_correction, iv, iv)
expected = iv # reset expected sequence
last_correction = count
expected = (expected + 1) & 0xffff
count += 1
| gpl-3.0 |
dagwieers/ansible | lib/ansible/modules/remote_management/lxca/lxca_cmms.py | 22 | 4442 | #!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = '''
---
version_added: "2.8"
author:
- Naval Patel (@navalkp)
- Prashant Bhosale (@prabhosa)
module: lxca_cmms
short_description: Custom module for lxca cmms inventory utility
description:
- This module returns/displays a inventory details of cmms
options:
uuid:
description:
uuid of device, this is string with length greater than 16.
command_options:
description:
options to filter nodes information
default: cmms
choices:
- cmms
- cmms_by_uuid
- cmms_by_chassis_uuid
chassis:
description:
uuid of chassis, this is string with length greater than 16.
extends_documentation_fragment:
- lxca_common
'''
EXAMPLES = '''
# get all cmms info
- name: get nodess data from LXCA
lxca_cmms:
login_user: USERID
login_password: Password
auth_url: "https://10.243.15.168"
# get specific cmms info by uuid
- name: get nodes data from LXCA
lxca_cmms:
login_user: USERID
login_password: Password
auth_url: "https://10.243.15.168"
uuid: "3C737AA5E31640CE949B10C129A8B01F"
command_options: cmms_by_uuid
# get specific cmms info by chassis uuid
- name: get nodes data from LXCA
lxca_cmms:
login_user: USERID
login_password: Password
auth_url: "https://10.243.15.168"
chassis: "3C737AA5E31640CE949B10C129A8B01F"
command_options: cmms_by_chassis_uuid
'''
RETURN = r'''
result:
description: cmms detail from lxca
returned: success
type: dict
sample:
cmmList:
- machineType: ''
model: ''
type: 'CMM'
uuid: '118D2C88C8FD11E4947B6EAE8B4BDCDF'
# bunch of properties
- machineType: ''
model: ''
type: 'CMM'
uuid: '223D2C88C8FD11E4947B6EAE8B4BDCDF'
# bunch of properties
# Multiple cmms details
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.lxca.common import LXCA_COMMON_ARGS, has_pylxca, connection_object
try:
from pylxca import cmms
except ImportError:
pass
UUID_REQUIRED = 'UUID of device is required for cmms_by_uuid command.'
CHASSIS_UUID_REQUIRED = 'UUID of chassis is required for cmms_by_chassis_uuid command.'
SUCCESS_MSG = "Success %s result"
def _cmms(module, lxca_con):
return cmms(lxca_con)
def _cmms_by_uuid(module, lxca_con):
if not module.params['uuid']:
module.fail_json(msg=UUID_REQUIRED)
return cmms(lxca_con, module.params['uuid'])
def _cmms_by_chassis_uuid(module, lxca_con):
if not module.params['chassis']:
module.fail_json(msg=CHASSIS_UUID_REQUIRED)
return cmms(lxca_con, chassis=module.params['chassis'])
def setup_module_object():
"""
this function merge argument spec and create ansible module object
:return:
"""
args_spec = dict(LXCA_COMMON_ARGS)
args_spec.update(INPUT_ARG_SPEC)
module = AnsibleModule(argument_spec=args_spec, supports_check_mode=False)
return module
FUNC_DICT = {
'cmms': _cmms,
'cmms_by_uuid': _cmms_by_uuid,
'cmms_by_chassis_uuid': _cmms_by_chassis_uuid,
}
INPUT_ARG_SPEC = dict(
command_options=dict(default='cmms', choices=['cmms', 'cmms_by_uuid',
'cmms_by_chassis_uuid']),
uuid=dict(default=None),
chassis=dict(default=None)
)
def execute_module(module):
"""
This function invoke commands
:param module: Ansible module object
"""
try:
with connection_object(module) as lxca_con:
result = FUNC_DICT[module.params['command_options']](module, lxca_con)
module.exit_json(changed=False,
msg=SUCCESS_MSG % module.params['command_options'],
result=result)
except Exception as exception:
error_msg = '; '.join((e) for e in exception.args)
module.fail_json(msg=error_msg, exception=traceback.format_exc())
def main():
module = setup_module_object()
has_pylxca(module)
execute_module(module)
if __name__ == '__main__':
main()
| gpl-3.0 |
jonathonwalz/ansible | lib/ansible/modules/system/at.py | 69 | 6626 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014, Richard Isaacson <richard.c.isaacson@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: at
short_description: Schedule the execution of a command or script file via the at command.
description:
- Use this module to schedule a command or script file to run once in the future.
- All jobs are executed in the 'a' queue.
version_added: "1.5"
options:
command:
description:
- A command to be executed in the future.
required: false
default: null
script_file:
description:
- An existing script file to be executed in the future.
required: false
default: null
count:
description:
- The count of units in the future to execute the command or script file.
required: true
units:
description:
- The type of units in the future to execute the command or script file.
required: true
choices: ["minutes", "hours", "days", "weeks"]
state:
description:
- The state dictates if the command or script file should be evaluated as present(added) or absent(deleted).
required: false
choices: ["present", "absent"]
default: "present"
unique:
description:
- If a matching job is present a new job will not be added.
required: false
default: false
requirements:
- at
author: "Richard Isaacson (@risaacson)"
'''
EXAMPLES = '''
# Schedule a command to execute in 20 minutes as root.
- at:
command: "ls -d / > /dev/null"
count: 20
units: minutes
# Match a command to an existing job and delete the job.
- at:
command: "ls -d / > /dev/null"
state: absent
# Schedule a command to execute in 20 minutes making sure it is unique in the queue.
- at:
command: "ls -d / > /dev/null"
unique: true
count: 20
units: minutes
'''
import os
import tempfile
def add_job(module, result, at_cmd, count, units, command, script_file):
at_command = "%s -f %s now + %s %s" % (at_cmd, script_file, count, units)
rc, out, err = module.run_command(at_command, check_rc=True)
if command:
os.unlink(script_file)
result['changed'] = True
def delete_job(module, result, at_cmd, command, script_file):
for matching_job in get_matching_jobs(module, at_cmd, script_file):
at_command = "%s -d %s" % (at_cmd, matching_job)
rc, out, err = module.run_command(at_command, check_rc=True)
result['changed'] = True
if command:
os.unlink(script_file)
module.exit_json(**result)
def get_matching_jobs(module, at_cmd, script_file):
matching_jobs = []
atq_cmd = module.get_bin_path('atq', True)
# Get list of job numbers for the user.
atq_command = "%s" % atq_cmd
rc, out, err = module.run_command(atq_command, check_rc=True)
current_jobs = out.splitlines()
if len(current_jobs) == 0:
return matching_jobs
# Read script_file into a string.
script_file_string = open(script_file).read().strip()
# Loop through the jobs.
# If the script text is contained in a job add job number to list.
for current_job in current_jobs:
split_current_job = current_job.split()
at_command = "%s -c %s" % (at_cmd, split_current_job[0])
rc, out, err = module.run_command(at_command, check_rc=True)
if script_file_string in out:
matching_jobs.append(split_current_job[0])
# Return the list.
return matching_jobs
def create_tempfile(command):
filed, script_file = tempfile.mkstemp(prefix='at')
fileh = os.fdopen(filed, 'w')
fileh.write(command)
fileh.close()
return script_file
def main():
module = AnsibleModule(
argument_spec = dict(
command=dict(required=False,
type='str'),
script_file=dict(required=False,
type='str'),
count=dict(required=False,
type='int'),
units=dict(required=False,
default=None,
choices=['minutes', 'hours', 'days', 'weeks'],
type='str'),
state=dict(required=False,
default='present',
choices=['present', 'absent'],
type='str'),
unique=dict(required=False,
default=False,
type='bool')
),
mutually_exclusive=[['command', 'script_file']],
required_one_of=[['command', 'script_file']],
supports_check_mode=False
)
at_cmd = module.get_bin_path('at', True)
command = module.params['command']
script_file = module.params['script_file']
count = module.params['count']
units = module.params['units']
state = module.params['state']
unique = module.params['unique']
if (state == 'present') and (not count or not units):
module.fail_json(msg="present state requires count and units")
result = {'state': state, 'changed': False}
# If command transform it into a script_file
if command:
script_file = create_tempfile(command)
# if absent remove existing and return
if state == 'absent':
delete_job(module, result, at_cmd, command, script_file)
# if unique if existing return unchanged
if unique:
if len(get_matching_jobs(module, at_cmd, script_file)) != 0:
if command:
os.unlink(script_file)
module.exit_json(**result)
result['script_file'] = script_file
result['count'] = count
result['units'] = units
add_job(module, result, at_cmd, count, units, command, script_file)
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
kustodian/ansible | test/units/modules/network/iosxr/test_iosxr_netconf.py | 68 | 3448 | # (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.iosxr import iosxr_netconf
from units.modules.utils import set_module_args
from .iosxr_module import TestIosxrModule
class TestIosxrNetconfModule(TestIosxrModule):
module = iosxr_netconf
def setUp(self):
super(TestIosxrNetconfModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.iosxr.iosxr_netconf.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.iosxr.iosxr_netconf.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestIosxrNetconfModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def test_iosxr_disable_netconf_service(self):
self.get_config.return_value = '''
netconf-yang agent
ssh
!
ssh server netconf vrf default
'''
self.load_config.return_value = 'dummy diff'
set_module_args(dict(netconf_port=830, netconf_vrf='default', state='absent'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['no netconf-yang agent ssh', 'no ssh server netconf port 830', 'no ssh server netconf vrf default'])
def test_iosxr_enable_netconf_service(self):
self.get_config.return_value = ''
self.load_config.return_value = 'dummy diff'
set_module_args(dict(netconf_port=830, netconf_vrf='default', state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['netconf-yang agent ssh', 'ssh server netconf port 830', 'ssh server netconf vrf default'])
def test_iosxr_change_netconf_port(self):
self.get_config.return_value = '''
netconf-yang agent
ssh
!
ssh server netconf vrf default
'''
self.load_config.return_value = 'dummy diff'
set_module_args(dict(netconf_port=9000, state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['ssh server netconf port 9000'])
def test_iosxr_change_netconf_vrf(self):
self.get_config.return_value = '''
netconf-yang agent
ssh
!
ssh server netconf vrf default
'''
self.load_config.return_value = 'dummy diff'
set_module_args(dict(netconf_vrf='new_default', state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['ssh server netconf vrf new_default'])
| gpl-3.0 |
GbalsaC/bitnamiP | opaque-keys/docs/conf.py | 1 | 10247 | # -*- coding: utf-8 -*-
#
# opaque_keys documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 29 22:54:25 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'opaque_keys'
copyright = u'2014, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'opaque_keysdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'opaque_keys.tex', u'opaque\\_keys Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'opaque_keys', u'opaque_keys Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'opaque_keys', u'opaque_keys Documentation',
u'Author', 'opaque_keys', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'opaque_keys'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2014, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'opaque_keys'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| agpl-3.0 |
mancoast/CPythonPyc_test | fail/331_test_startfile.py | 2 | 1100 | # Ridiculously simple test of the os.startfile function for Windows.
#
# empty.vbs is an empty file (except for a comment), which does
# nothing when run with cscript or wscript.
#
# A possible improvement would be to have empty.vbs do something that
# we can detect here, to make sure that not only the os.startfile()
# call succeeded, but also the script actually has run.
import unittest
from test import support
import os
import sys
from os import path
startfile = support.get_attribute(os, 'startfile')
class TestCase(unittest.TestCase):
def test_nonexisting(self):
self.assertRaises(OSError, startfile, "nonexisting.vbs")
def test_empty(self):
# Switch to an existing, but safe, working directory to let the
# cleanup code do its thing without permission errors.
with support.temp_cwd(path=path.dirname(sys.executable)):
empty = path.join(path.dirname(__file__), "empty.vbs")
startfile(empty)
startfile(empty, "open")
def test_main():
support.run_unittest(TestCase)
if __name__=="__main__":
test_main()
| gpl-3.0 |
shlomif/PySolFC | pysollib/pysolgtk/tkconst.py | 2 | 1547 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------##
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------##
# imports
# import sys
from gtk import gdk
# ************************************************************************
# * constants
# ************************************************************************
EVENT_HANDLED = 1
EVENT_PROPAGATE = 0
CURSOR_DRAG = gdk.HAND1
CURSOR_WATCH = gdk.WATCH
CURSOR_DOWN_ARROW = gdk.SB_DOWN_ARROW
TOOLBAR_BUTTONS = (
"new",
"restart",
"open",
"save",
"undo",
"redo",
"autodrop",
"shuffle",
"pause",
"statistics",
"rules",
"quit",
"player",
)
| gpl-3.0 |
emanuelmetal/TFGCruzada | CruzadaSite/CruzadaSite/settings.py | 1 | 2531 | """
Django settings for CruzadaSite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=h8-(is2&_w6k8f+4(k)5%nu#bf_)4@9b#wac&3r2(3_4hrbt('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Cruzada',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request"
)
ROOT_URLCONF = 'CruzadaSite.urls'
WSGI_APPLICATION = 'CruzadaSite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cruzada',
'USER': 'root',
'PASSWORD': '123456',
'PORT': '3306',
'HOST': '192.168.203.129',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'es-ar'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'Cruzada/templates'),
) | gpl-2.0 |
h2oai/h2o-3 | h2o-py/h2o/automl/_h2o_automl_output.py | 2 | 1913 | import h2o
from h2o.automl._base import H2OAutoMLBaseMixin
from h2o.base import Keyed
class H2OAutoMLOutput(H2OAutoMLBaseMixin, Keyed):
"""
AutoML Output object containing the results of AutoML
"""
def __init__(self, state):
self._project_name = state['project_name']
self._key = state['json']['automl_id']['name']
self._leader = state['leader']
self._leaderboard = state['leaderboard']
self._event_log = el = state['event_log']
self._training_info = {r[0]: r[1]
for r in el[el['name'] != '', ['name', 'value']]
.as_data_frame(use_pandas=False, header=False)
}
def __getitem__(self, item):
if (
hasattr(self, item) and
# do not enable user to get anything else than properties through the dictionary interface
hasattr(self.__class__, item) and
isinstance(getattr(self.__class__, item), property)
):
return getattr(self, item)
raise KeyError(item)
@property
def project_name(self):
return self._project_name
@property
def leader(self):
return self._leader
@property
def leaderboard(self):
return self._leaderboard
@property
def training_info(self):
return self._training_info
@property
def event_log(self):
return self._event_log
#-------------------------------------------------------------------------------------------------------------------
# Overrides
#-------------------------------------------------------------------------------------------------------------------
@property
def key(self):
return self._key
def detach(self):
self._project_name = None
h2o.remove(self.leaderboard)
h2o.remove(self.event_log)
| apache-2.0 |
benjaminrigaud/django | django/views/defaults.py | 46 | 2760 | from django import http
from django.template import (Context, RequestContext,
loader, Template, TemplateDoesNotExist)
from django.views.decorators.csrf import requires_csrf_token
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def page_not_found(request, template_name='404.html'):
"""
Default 404 handler.
Templates: :template:`404.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/')
"""
try:
template = loader.get_template(template_name)
content_type = None # Django will use DEFAULT_CONTENT_TYPE
except TemplateDoesNotExist:
template = Template(
'<h1>Not Found</h1>'
'<p>The requested URL {{ request_path }} was not found on this server.</p>')
content_type = 'text/html'
body = template.render(RequestContext(request, {'request_path': request.path}))
return http.HttpResponseNotFound(body, content_type=content_type)
@requires_csrf_token
def server_error(request, template_name='500.html'):
"""
500 error handler.
Templates: :template:`500.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
return http.HttpResponseServerError('<h1>Server Error (500)</h1>', content_type='text/html')
return http.HttpResponseServerError(template.render(Context({})))
@requires_csrf_token
def bad_request(request, template_name='400.html'):
"""
400 error handler.
Templates: :template:`400.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
return http.HttpResponseBadRequest('<h1>Bad Request (400)</h1>', content_type='text/html')
return http.HttpResponseBadRequest(template.render(Context({})))
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def permission_denied(request, template_name='403.html'):
"""
Permission denied (403) handler.
Templates: :template:`403.html`
Context: None
If the template does not exist, an Http403 response containing the text
"403 Forbidden" (as per RFC 2616) will be returned.
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
return http.HttpResponseForbidden('<h1>403 Forbidden</h1>', content_type='text/html')
return http.HttpResponseForbidden(template.render(RequestContext(request)))
| bsd-3-clause |
jokem59/heroprotocol | protocol46446.py | 25 | 26840 | # Copyright (c) 2015 Blizzard Entertainment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from decoders import *
# Decoding instructions for each protocol type.
typeinfos = [
('_int',[(0,7)]), #0
('_int',[(0,4)]), #1
('_int',[(0,5)]), #2
('_int',[(0,6)]), #3
('_int',[(0,14)]), #4
('_int',[(0,22)]), #5
('_int',[(0,32)]), #6
('_choice',[(0,2),{0:('m_uint6',3),1:('m_uint14',4),2:('m_uint22',5),3:('m_uint32',6)}]), #7
('_struct',[[('m_userId',2,-1)]]), #8
('_blob',[(0,8)]), #9
('_int',[(0,8)]), #10
('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',6,4),('m_baseBuild',6,5)]]), #11
('_int',[(0,3)]), #12
('_bool',[]), #13
('_array',[(16,0),10]), #14
('_optional',[14]), #15
('_blob',[(16,0)]), #16
('_struct',[[('m_dataDeprecated',15,0),('m_data',16,1)]]), #17
('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',6,3),('m_useScaledTime',13,4),('m_ngdpRootKey',17,5),('m_dataBuildNum',6,6),('m_fixedFileHash',17,7)]]), #18
('_fourcc',[]), #19
('_blob',[(0,7)]), #20
('_int',[(0,64)]), #21
('_struct',[[('m_region',10,0),('m_programId',19,1),('m_realm',6,2),('m_name',20,3),('m_id',21,4)]]), #22
('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), #23
('_int',[(0,2)]), #24
('_optional',[10]), #25
('_struct',[[('m_name',9,0),('m_toon',22,1),('m_race',9,2),('m_color',23,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',24,7),('m_result',24,8),('m_workingSetSlotId',25,9),('m_hero',9,10)]]), #26
('_array',[(0,5),26]), #27
('_optional',[27]), #28
('_blob',[(0,10)]), #29
('_blob',[(0,11)]), #30
('_struct',[[('m_file',30,0)]]), #31
('_optional',[13]), #32
('_int',[(-9223372036854775808,64)]), #33
('_blob',[(0,12)]), #34
('_blob',[(40,0)]), #35
('_array',[(0,6),35]), #36
('_optional',[36]), #37
('_array',[(0,6),30]), #38
('_optional',[38]), #39
('_struct',[[('m_playerList',28,0),('m_title',29,1),('m_difficulty',9,2),('m_thumbnail',31,3),('m_isBlizzardMap',13,4),('m_restartAsTransitionMap',32,16),('m_timeUTC',33,5),('m_timeLocalOffset',33,6),('m_description',34,7),('m_imageFilePath',30,8),('m_campaignIndex',10,15),('m_mapFileName',30,9),('m_cacheHandles',37,10),('m_miniSave',13,11),('m_gameSpeed',12,12),('m_defaultDifficulty',3,13),('m_modPaths',39,14)]]), #40
('_optional',[9]), #41
('_optional',[35]), #42
('_optional',[6]), #43
('_struct',[[('m_race',25,-1)]]), #44
('_struct',[[('m_team',25,-1)]]), #45
('_blob',[(0,9)]), #46
('_struct',[[('m_name',9,-18),('m_clanTag',41,-17),('m_clanLogo',42,-16),('m_highestLeague',25,-15),('m_combinedRaceLevels',43,-14),('m_randomSeed',6,-13),('m_racePreference',44,-12),('m_teamPreference',45,-11),('m_testMap',13,-10),('m_testAuto',13,-9),('m_examine',13,-8),('m_customInterface',13,-7),('m_testType',6,-6),('m_observe',24,-5),('m_hero',46,-4),('m_skin',46,-3),('m_mount',46,-2),('m_toonHandle',20,-1)]]), #47
('_array',[(0,5),47]), #48
('_struct',[[('m_lockTeams',13,-16),('m_teamsTogether',13,-15),('m_advancedSharedControl',13,-14),('m_randomRaces',13,-13),('m_battleNet',13,-12),('m_amm',13,-11),('m_competitive',13,-10),('m_practice',13,-9),('m_cooperative',13,-8),('m_noVictoryOrDefeat',13,-7),('m_heroDuplicatesAllowed',13,-6),('m_fog',24,-5),('m_observers',24,-4),('m_userDifficulty',24,-3),('m_clientDebugFlags',21,-2),('m_ammId',43,-1)]]), #49
('_int',[(1,4)]), #50
('_int',[(1,8)]), #51
('_bitarray',[(0,6)]), #52
('_bitarray',[(0,8)]), #53
('_bitarray',[(0,2)]), #54
('_bitarray',[(0,7)]), #55
('_struct',[[('m_allowedColors',52,-6),('m_allowedRaces',53,-5),('m_allowedDifficulty',52,-4),('m_allowedControls',53,-3),('m_allowedObserveTypes',54,-2),('m_allowedAIBuilds',55,-1)]]), #56
('_array',[(0,5),56]), #57
('_struct',[[('m_randomValue',6,-26),('m_gameCacheName',29,-25),('m_gameOptions',49,-24),('m_gameSpeed',12,-23),('m_gameType',12,-22),('m_maxUsers',2,-21),('m_maxObservers',2,-20),('m_maxPlayers',2,-19),('m_maxTeams',50,-18),('m_maxColors',3,-17),('m_maxRaces',51,-16),('m_maxControls',10,-15),('m_mapSizeX',10,-14),('m_mapSizeY',10,-13),('m_mapFileSyncChecksum',6,-12),('m_mapFileName',30,-11),('m_mapAuthorName',9,-10),('m_modFileSyncChecksum',6,-9),('m_slotDescriptions',57,-8),('m_defaultDifficulty',3,-7),('m_defaultAIBuild',0,-6),('m_cacheHandles',36,-5),('m_hasExtensionMod',13,-4),('m_isBlizzardMap',13,-3),('m_isPremadeFFA',13,-2),('m_isCoopMode',13,-1)]]), #58
('_optional',[1]), #59
('_optional',[2]), #60
('_struct',[[('m_color',60,-1)]]), #61
('_array',[(0,4),46]), #62
('_array',[(0,17),6]), #63
('_array',[(0,9),6]), #64
('_struct',[[('m_control',10,-20),('m_userId',59,-19),('m_teamId',1,-18),('m_colorPref',61,-17),('m_racePref',44,-16),('m_difficulty',3,-15),('m_aiBuild',0,-14),('m_handicap',0,-13),('m_observe',24,-12),('m_logoIndex',6,-11),('m_hero',46,-10),('m_skin',46,-9),('m_mount',46,-8),('m_artifacts',62,-7),('m_workingSetSlotId',25,-6),('m_rewards',63,-5),('m_toonHandle',20,-4),('m_licenses',64,-3),('m_tandemLeaderUserId',59,-2),('m_hasSilencePenalty',13,-1)]]), #65
('_array',[(0,5),65]), #66
('_struct',[[('m_phase',12,-11),('m_maxUsers',2,-10),('m_maxObservers',2,-9),('m_slots',66,-8),('m_randomSeed',6,-7),('m_hostUserId',59,-6),('m_isSinglePlayer',13,-5),('m_pickedMapTag',10,-4),('m_gameDuration',6,-3),('m_defaultDifficulty',3,-2),('m_defaultAIBuild',0,-1)]]), #67
('_struct',[[('m_userInitialData',48,-3),('m_gameDescription',58,-2),('m_lobbyState',67,-1)]]), #68
('_struct',[[('m_syncLobbyState',68,-1)]]), #69
('_struct',[[('m_name',20,-1)]]), #70
('_blob',[(0,6)]), #71
('_struct',[[('m_name',71,-1)]]), #72
('_struct',[[('m_name',71,-3),('m_type',6,-2),('m_data',20,-1)]]), #73
('_struct',[[('m_type',6,-3),('m_name',71,-2),('m_data',34,-1)]]), #74
('_array',[(0,5),10]), #75
('_struct',[[('m_signature',75,-2),('m_toonHandle',20,-1)]]), #76
('_struct',[[('m_gameFullyDownloaded',13,-14),('m_developmentCheatsEnabled',13,-13),('m_testCheatsEnabled',13,-12),('m_multiplayerCheatsEnabled',13,-11),('m_syncChecksummingEnabled',13,-10),('m_isMapToMapTransition',13,-9),('m_debugPauseEnabled',13,-8),('m_useGalaxyAsserts',13,-7),('m_platformMac',13,-6),('m_cameraFollow',13,-5),('m_baseBuildNum',6,-4),('m_buildNum',6,-3),('m_versionFlags',6,-2),('m_hotkeyProfile',46,-1)]]), #77
('_struct',[[]]), #78
('_int',[(0,16)]), #79
('_struct',[[('x',79,-2),('y',79,-1)]]), #80
('_struct',[[('m_which',12,-2),('m_target',80,-1)]]), #81
('_struct',[[('m_fileName',30,-5),('m_automatic',13,-4),('m_overwrite',13,-3),('m_name',9,-2),('m_description',29,-1)]]), #82
('_int',[(1,32)]), #83
('_struct',[[('m_sequence',83,-1)]]), #84
('_null',[]), #85
('_int',[(0,20)]), #86
('_int',[(-2147483648,32)]), #87
('_struct',[[('x',86,-3),('y',86,-2),('z',87,-1)]]), #88
('_struct',[[('m_targetUnitFlags',79,-7),('m_timer',10,-6),('m_tag',6,-5),('m_snapshotUnitLink',79,-4),('m_snapshotControlPlayerId',59,-3),('m_snapshotUpkeepPlayerId',59,-2),('m_snapshotPoint',88,-1)]]), #89
('_choice',[(0,2),{0:('None',85),1:('TargetPoint',88),2:('TargetUnit',89)}]), #90
('_struct',[[('m_target',90,-4),('m_time',87,-3),('m_verb',29,-2),('m_arguments',29,-1)]]), #91
('_struct',[[('m_data',91,-1)]]), #92
('_int',[(0,25)]), #93
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_abilCmdData',25,-1)]]), #94
('_optional',[94]), #95
('_choice',[(0,2),{0:('None',85),1:('TargetPoint',88),2:('TargetUnit',89),3:('Data',6)}]), #96
('_optional',[88]), #97
('_struct',[[('m_cmdFlags',93,-7),('m_abil',95,-6),('m_data',96,-5),('m_vector',97,-4),('m_sequence',83,-3),('m_otherUnit',43,-2),('m_unitGroup',43,-1)]]), #98
('_int',[(0,9)]), #99
('_bitarray',[(0,9)]), #100
('_array',[(0,9),99]), #101
('_choice',[(0,2),{0:('None',85),1:('Mask',100),2:('OneIndices',101),3:('ZeroIndices',101)}]), #102
('_struct',[[('m_unitLink',79,-4),('m_subgroupPriority',10,-3),('m_intraSubgroupPriority',10,-2),('m_count',99,-1)]]), #103
('_array',[(0,9),103]), #104
('_struct',[[('m_subgroupIndex',99,-4),('m_removeMask',102,-3),('m_addSubgroups',104,-2),('m_addUnitTags',64,-1)]]), #105
('_struct',[[('m_controlGroupId',1,-2),('m_delta',105,-1)]]), #106
('_struct',[[('m_controlGroupIndex',1,-3),('m_controlGroupUpdate',12,-2),('m_mask',102,-1)]]), #107
('_struct',[[('m_count',99,-6),('m_subgroupCount',99,-5),('m_activeSubgroupIndex',99,-4),('m_unitTagsChecksum',6,-3),('m_subgroupIndicesChecksum',6,-2),('m_subgroupsChecksum',6,-1)]]), #108
('_struct',[[('m_controlGroupId',1,-2),('m_selectionSyncData',108,-1)]]), #109
('_struct',[[('m_chatMessage',29,-1)]]), #110
('_struct',[[('m_speed',12,-1)]]), #111
('_int',[(-128,8)]), #112
('_struct',[[('m_delta',112,-1)]]), #113
('_struct',[[('x',87,-2),('y',87,-1)]]), #114
('_struct',[[('m_point',114,-4),('m_unit',6,-3),('m_pingedMinimap',13,-2),('m_option',87,-1)]]), #115
('_struct',[[('m_verb',29,-2),('m_arguments',29,-1)]]), #116
('_struct',[[('m_alliance',6,-2),('m_control',6,-1)]]), #117
('_struct',[[('m_unitTag',6,-1)]]), #118
('_struct',[[('m_unitTag',6,-2),('m_flags',10,-1)]]), #119
('_struct',[[('m_conversationId',87,-2),('m_replyId',87,-1)]]), #120
('_optional',[20]), #121
('_struct',[[('m_gameUserId',1,-6),('m_observe',24,-5),('m_name',9,-4),('m_toonHandle',121,-3),('m_clanTag',41,-2),('m_clanLogo',42,-1)]]), #122
('_array',[(0,5),122]), #123
('_int',[(0,1)]), #124
('_struct',[[('m_userInfos',123,-2),('m_method',124,-1)]]), #125
('_choice',[(0,3),{0:('None',85),1:('Checked',13),2:('ValueChanged',6),3:('SelectionChanged',87),4:('TextChanged',30),5:('MouseButton',6)}]), #126
('_struct',[[('m_controlId',87,-3),('m_eventType',87,-2),('m_eventData',126,-1)]]), #127
('_struct',[[('m_soundHash',6,-2),('m_length',6,-1)]]), #128
('_array',[(0,7),6]), #129
('_struct',[[('m_soundHash',129,-2),('m_length',129,-1)]]), #130
('_struct',[[('m_syncInfo',130,-1)]]), #131
('_struct',[[('m_queryId',79,-3),('m_lengthMs',6,-2),('m_finishGameLoop',6,-1)]]), #132
('_struct',[[('m_queryId',79,-2),('m_lengthMs',6,-1)]]), #133
('_struct',[[('m_animWaitQueryId',79,-1)]]), #134
('_struct',[[('m_sound',6,-1)]]), #135
('_struct',[[('m_transmissionId',87,-2),('m_thread',6,-1)]]), #136
('_struct',[[('m_transmissionId',87,-1)]]), #137
('_optional',[80]), #138
('_optional',[79]), #139
('_optional',[112]), #140
('_struct',[[('m_target',138,-6),('m_distance',139,-5),('m_pitch',139,-4),('m_yaw',139,-3),('m_reason',140,-2),('m_follow',13,-1)]]), #141
('_struct',[[('m_skipType',124,-1)]]), #142
('_int',[(0,11)]), #143
('_struct',[[('x',143,-2),('y',143,-1)]]), #144
('_struct',[[('m_button',6,-5),('m_down',13,-4),('m_posUI',144,-3),('m_posWorld',88,-2),('m_flags',112,-1)]]), #145
('_struct',[[('m_posUI',144,-3),('m_posWorld',88,-2),('m_flags',112,-1)]]), #146
('_struct',[[('m_achievementLink',79,-1)]]), #147
('_struct',[[('m_hotkey',6,-2),('m_down',13,-1)]]), #148
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_state',112,-1)]]), #149
('_struct',[[('m_soundtrack',6,-1)]]), #150
('_struct',[[('m_key',112,-2),('m_flags',112,-1)]]), #151
('_struct',[[('m_error',87,-2),('m_abil',95,-1)]]), #152
('_int',[(0,19)]), #153
('_struct',[[('m_decrementMs',153,-1)]]), #154
('_struct',[[('m_portraitId',87,-1)]]), #155
('_struct',[[('m_functionName',20,-1)]]), #156
('_struct',[[('m_result',87,-1)]]), #157
('_struct',[[('m_gameMenuItemIndex',87,-1)]]), #158
('_int',[(-32768,16)]), #159
('_struct',[[('m_wheelSpin',159,-2),('m_flags',112,-1)]]), #160
('_struct',[[('m_button',79,-1)]]), #161
('_struct',[[('m_cutsceneId',87,-2),('m_bookmarkName',20,-1)]]), #162
('_struct',[[('m_cutsceneId',87,-1)]]), #163
('_struct',[[('m_cutsceneId',87,-3),('m_conversationLine',20,-2),('m_altConversationLine',20,-1)]]), #164
('_struct',[[('m_cutsceneId',87,-2),('m_conversationLine',20,-1)]]), #165
('_struct',[[('m_leaveReason',1,-1)]]), #166
('_struct',[[('m_observe',24,-7),('m_name',9,-6),('m_toonHandle',121,-5),('m_clanTag',41,-4),('m_clanLogo',42,-3),('m_hijack',13,-2),('m_hijackCloneGameUserId',59,-1)]]), #167
('_optional',[83]), #168
('_struct',[[('m_state',24,-2),('m_sequence',168,-1)]]), #169
('_struct',[[('m_sequence',168,-2),('m_target',88,-1)]]), #170
('_struct',[[('m_sequence',168,-2),('m_target',89,-1)]]), #171
('_struct',[[('m_catalog',10,-4),('m_entry',79,-3),('m_field',9,-2),('m_value',9,-1)]]), #172
('_struct',[[('m_index',6,-1)]]), #173
('_struct',[[('m_shown',13,-1)]]), #174
('_struct',[[('m_recipient',12,-2),('m_string',30,-1)]]), #175
('_struct',[[('m_recipient',12,-2),('m_point',114,-1)]]), #176
('_struct',[[('m_progress',87,-1)]]), #177
('_struct',[[('m_status',24,-1)]]), #178
('_struct',[[('m_abilLink',79,-3),('m_abilCmdIndex',2,-2),('m_buttonLink',79,-1)]]), #179
('_struct',[[('m_behaviorLink',79,-2),('m_buttonLink',79,-1)]]), #180
('_choice',[(0,2),{0:('None',85),1:('Ability',179),2:('Behavior',180),3:('Vitals',159)}]), #181
('_struct',[[('m_announcement',181,-4),('m_announceLink',79,-3),('m_otherUnitTag',6,-2),('m_unitTag',6,-1)]]), #182
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2),('m_controlPlayerId',1,3),('m_upkeepPlayerId',1,4),('m_x',10,5),('m_y',10,6)]]), #183
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_x',10,2),('m_y',10,3)]]), #184
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_killerPlayerId',59,2),('m_x',10,3),('m_y',10,4),('m_killerUnitTagIndex',43,5),('m_killerUnitTagRecycle',43,6)]]), #185
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_controlPlayerId',1,2),('m_upkeepPlayerId',1,3)]]), #186
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2)]]), #187
('_struct',[[('m_playerId',1,0),('m_upgradeTypeName',29,1),('m_count',87,2)]]), #188
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1)]]), #189
('_array',[(0,10),87]), #190
('_struct',[[('m_firstUnitIndex',6,0),('m_items',190,1)]]), #191
('_struct',[[('m_playerId',1,0),('m_type',6,1),('m_userId',43,2),('m_slotId',43,3)]]), #192
('_struct',[[('m_key',29,0)]]), #193
('_struct',[[('__parent',193,0),('m_value',29,1)]]), #194
('_array',[(0,6),194]), #195
('_optional',[195]), #196
('_struct',[[('__parent',193,0),('m_value',87,1)]]), #197
('_array',[(0,6),197]), #198
('_optional',[198]), #199
('_struct',[[('m_eventName',29,0),('m_stringData',196,1),('m_intData',199,2),('m_fixedData',199,3)]]), #200
('_struct',[[('m_value',6,0),('m_time',6,1)]]), #201
('_array',[(0,6),201]), #202
('_array',[(0,5),202]), #203
('_struct',[[('m_name',29,0),('m_values',203,1)]]), #204
('_array',[(0,21),204]), #205
('_struct',[[('m_instanceList',205,0)]]), #206
]
# Map from protocol NNet.Game.*Event eventid to (typeid, name)
game_event_types = {
5: (78, 'NNet.Game.SUserFinishedLoadingSyncEvent'),
7: (77, 'NNet.Game.SUserOptionsEvent'),
9: (70, 'NNet.Game.SBankFileEvent'),
10: (72, 'NNet.Game.SBankSectionEvent'),
11: (73, 'NNet.Game.SBankKeyEvent'),
12: (74, 'NNet.Game.SBankValueEvent'),
13: (76, 'NNet.Game.SBankSignatureEvent'),
14: (81, 'NNet.Game.SCameraSaveEvent'),
21: (82, 'NNet.Game.SSaveGameEvent'),
22: (78, 'NNet.Game.SSaveGameDoneEvent'),
23: (78, 'NNet.Game.SLoadGameDoneEvent'),
25: (84, 'NNet.Game.SCommandManagerResetEvent'),
26: (92, 'NNet.Game.SGameCheatEvent'),
27: (98, 'NNet.Game.SCmdEvent'),
28: (106, 'NNet.Game.SSelectionDeltaEvent'),
29: (107, 'NNet.Game.SControlGroupUpdateEvent'),
30: (109, 'NNet.Game.SSelectionSyncCheckEvent'),
32: (110, 'NNet.Game.STriggerChatMessageEvent'),
34: (111, 'NNet.Game.SSetAbsoluteGameSpeedEvent'),
35: (113, 'NNet.Game.SAddAbsoluteGameSpeedEvent'),
36: (115, 'NNet.Game.STriggerPingEvent'),
37: (116, 'NNet.Game.SBroadcastCheatEvent'),
38: (117, 'NNet.Game.SAllianceEvent'),
39: (118, 'NNet.Game.SUnitClickEvent'),
40: (119, 'NNet.Game.SUnitHighlightEvent'),
41: (120, 'NNet.Game.STriggerReplySelectedEvent'),
43: (125, 'NNet.Game.SHijackReplayGameEvent'),
44: (78, 'NNet.Game.STriggerSkippedEvent'),
45: (128, 'NNet.Game.STriggerSoundLengthQueryEvent'),
46: (135, 'NNet.Game.STriggerSoundOffsetEvent'),
47: (136, 'NNet.Game.STriggerTransmissionOffsetEvent'),
48: (137, 'NNet.Game.STriggerTransmissionCompleteEvent'),
49: (141, 'NNet.Game.SCameraUpdateEvent'),
50: (78, 'NNet.Game.STriggerAbortMissionEvent'),
55: (127, 'NNet.Game.STriggerDialogControlEvent'),
56: (131, 'NNet.Game.STriggerSoundLengthSyncEvent'),
57: (142, 'NNet.Game.STriggerConversationSkippedEvent'),
58: (145, 'NNet.Game.STriggerMouseClickedEvent'),
59: (146, 'NNet.Game.STriggerMouseMovedEvent'),
60: (147, 'NNet.Game.SAchievementAwardedEvent'),
61: (148, 'NNet.Game.STriggerHotkeyPressedEvent'),
62: (149, 'NNet.Game.STriggerTargetModeUpdateEvent'),
64: (150, 'NNet.Game.STriggerSoundtrackDoneEvent'),
66: (151, 'NNet.Game.STriggerKeyPressedEvent'),
67: (156, 'NNet.Game.STriggerMovieFunctionEvent'),
76: (152, 'NNet.Game.STriggerCommandErrorEvent'),
86: (78, 'NNet.Game.STriggerMovieStartedEvent'),
87: (78, 'NNet.Game.STriggerMovieFinishedEvent'),
88: (154, 'NNet.Game.SDecrementGameTimeRemainingEvent'),
89: (155, 'NNet.Game.STriggerPortraitLoadedEvent'),
90: (157, 'NNet.Game.STriggerCustomDialogDismissedEvent'),
91: (158, 'NNet.Game.STriggerGameMenuItemSelectedEvent'),
92: (160, 'NNet.Game.STriggerMouseWheelEvent'),
95: (161, 'NNet.Game.STriggerButtonPressedEvent'),
96: (78, 'NNet.Game.STriggerGameCreditsFinishedEvent'),
97: (162, 'NNet.Game.STriggerCutsceneBookmarkFiredEvent'),
98: (163, 'NNet.Game.STriggerCutsceneEndSceneFiredEvent'),
99: (164, 'NNet.Game.STriggerCutsceneConversationLineEvent'),
100: (165, 'NNet.Game.STriggerCutsceneConversationLineMissingEvent'),
101: (166, 'NNet.Game.SGameUserLeaveEvent'),
102: (167, 'NNet.Game.SGameUserJoinEvent'),
103: (169, 'NNet.Game.SCommandManagerStateEvent'),
104: (170, 'NNet.Game.SCmdUpdateTargetPointEvent'),
105: (171, 'NNet.Game.SCmdUpdateTargetUnitEvent'),
106: (132, 'NNet.Game.STriggerAnimLengthQueryByNameEvent'),
107: (133, 'NNet.Game.STriggerAnimLengthQueryByPropsEvent'),
108: (134, 'NNet.Game.STriggerAnimOffsetEvent'),
109: (172, 'NNet.Game.SCatalogModifyEvent'),
110: (173, 'NNet.Game.SHeroTalentTreeSelectedEvent'),
111: (78, 'NNet.Game.STriggerProfilerLoggingFinishedEvent'),
112: (174, 'NNet.Game.SHeroTalentTreeSelectionPanelToggledEvent'),
}
# The typeid of the NNet.Game.EEventId enum.
game_eventid_typeid = 0
# Map from protocol NNet.Game.*Message eventid to (typeid, name)
message_event_types = {
0: (175, 'NNet.Game.SChatMessage'),
1: (176, 'NNet.Game.SPingMessage'),
2: (177, 'NNet.Game.SLoadingProgressMessage'),
3: (78, 'NNet.Game.SServerPingMessage'),
4: (178, 'NNet.Game.SReconnectNotifyMessage'),
5: (182, 'NNet.Game.SPlayerAnnounceMessage'),
}
# The typeid of the NNet.Game.EMessageId enum.
message_eventid_typeid = 1
# Map from protocol NNet.Replay.Tracker.*Event eventid to (typeid, name)
tracker_event_types = {
1: (183, 'NNet.Replay.Tracker.SUnitBornEvent'),
2: (185, 'NNet.Replay.Tracker.SUnitDiedEvent'),
3: (186, 'NNet.Replay.Tracker.SUnitOwnerChangeEvent'),
4: (187, 'NNet.Replay.Tracker.SUnitTypeChangeEvent'),
5: (188, 'NNet.Replay.Tracker.SUpgradeEvent'),
6: (183, 'NNet.Replay.Tracker.SUnitInitEvent'),
7: (189, 'NNet.Replay.Tracker.SUnitDoneEvent'),
8: (191, 'NNet.Replay.Tracker.SUnitPositionsEvent'),
9: (192, 'NNet.Replay.Tracker.SPlayerSetupEvent'),
10: (200, 'NNet.Replay.Tracker.SStatGameEvent'),
11: (206, 'NNet.Replay.Tracker.SScoreResultEvent'),
12: (184, 'NNet.Replay.Tracker.SUnitRevivedEvent'),
}
# The typeid of the NNet.Replay.Tracker.EEventId enum.
tracker_eventid_typeid = 2
# The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas).
svaruint32_typeid = 7
# The typeid of NNet.Replay.SGameUserId (the type used to encode player ids).
replay_userid_typeid = 8
# The typeid of NNet.Replay.SHeader (the type used to store replay game version and length).
replay_header_typeid = 18
# The typeid of NNet.Game.SDetails (the type used to store overall replay details).
game_details_typeid = 40
# The typeid of NNet.Replay.SInitData (the type used to store the inital lobby).
replay_initdata_typeid = 69
def _varuint32_value(value):
# Returns the numeric value from a SVarUint32 instance.
for k,v in value.iteritems():
return v
return 0
def _decode_event_stream(decoder, eventid_typeid, event_types, decode_user_id):
# Decodes events prefixed with a gameloop and possibly userid
gameloop = 0
while not decoder.done():
start_bits = decoder.used_bits()
# decode the gameloop delta before each event
delta = _varuint32_value(decoder.instance(svaruint32_typeid))
gameloop += delta
# decode the userid before each event
if decode_user_id:
userid = decoder.instance(replay_userid_typeid)
# decode the event id
eventid = decoder.instance(eventid_typeid)
typeid, typename = event_types.get(eventid, (None, None))
if typeid is None:
raise CorruptedError('eventid(%d) at %s' % (eventid, decoder))
# decode the event struct instance
event = decoder.instance(typeid)
event['_event'] = typename
event['_eventid'] = eventid
# insert gameloop and userid
event['_gameloop'] = gameloop
if decode_user_id:
event['_userid'] = userid
# the next event is byte aligned
decoder.byte_align()
# insert bits used in stream
event['_bits'] = decoder.used_bits() - start_bits
yield event
def decode_replay_game_events(contents):
"""Decodes and yields each game event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
game_eventid_typeid,
game_event_types,
decode_user_id=True):
yield event
def decode_replay_message_events(contents):
"""Decodes and yields each message event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
message_eventid_typeid,
message_event_types,
decode_user_id=True):
yield event
def decode_replay_tracker_events(contents):
"""Decodes and yields each tracker event from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
tracker_eventid_typeid,
tracker_event_types,
decode_user_id=False):
yield event
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(replay_header_typeid)
def decode_replay_details(contents):
"""Decodes and returns the game details from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(game_details_typeid)
def decode_replay_initdata(contents):
"""Decodes and return the replay init data from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
return decoder.instance(replay_initdata_typeid)
def decode_replay_attributes_events(contents):
"""Decodes and yields each attribute from the contents byte string."""
buffer = BitPackedBuffer(contents, 'little')
attributes = {}
if not buffer.done():
attributes['source'] = buffer.read_bits(8)
attributes['mapNamespace'] = buffer.read_bits(32)
count = buffer.read_bits(32)
attributes['scopes'] = {}
while not buffer.done():
value = {}
value['namespace'] = buffer.read_bits(32)
value['attrid'] = attrid = buffer.read_bits(32)
scope = buffer.read_bits(8)
value['value'] = buffer.read_aligned_bytes(4)[::-1].strip('\x00')
if not scope in attributes['scopes']:
attributes['scopes'][scope] = {}
if not attrid in attributes['scopes'][scope]:
attributes['scopes'][scope][attrid] = []
attributes['scopes'][scope][attrid].append(value)
return attributes
def unit_tag(unitTagIndex, unitTagRecycle):
return (unitTagIndex << 18) + unitTagRecycle
def unit_tag_index(unitTag):
return (unitTag >> 18) & 0x00003fff
def unit_tag_recycle(unitTag):
return (unitTag) & 0x0003ffff
| mit |
franciscofranco/Tuna_JB_pre1 | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
USGSDenverPychron/pychron | pychron/extraction_line/tasks/extraction_line_pane.py | 1 | 2734 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.tasks.traits_dock_pane import TraitsDockPane
from pyface.tasks.traits_task_pane import TraitsTaskPane
from traits.api import Any
from traitsui.api import View, UItem, InstanceEditor, ListEditor
# ============= standard library imports ========================
# ============= local library imports ==========================
class CanvasPane(TraitsTaskPane):
id = 'pychron.extraction_line.canvas'
name = 'Extraction Line'
def traits_view(self):
v = View(UItem('canvas',
defined_when='not plugin_canvases',
editor=InstanceEditor(),
style='custom'),
UItem('canvases',
defined_when='plugin_canvases',
editor=ListEditor(page_name='.display_name',
use_notebook=True),
style='custom'))
return v
class CanvasDockPane(TraitsDockPane):
id = 'pychron.extraction_line.canvas_dock'
name = 'Extraction Line Canvas'
canvas = Any
def traits_view(self):
v = View(UItem('canvas',
editor=InstanceEditor(),
style='custom',
width=500))
return v
class GaugePane(TraitsDockPane):
name = 'Gauges'
id = 'pychron.extraction_line.gauges'
def traits_view(self):
v = View(UItem('gauge_manager',
editor=InstanceEditor(),
style='custom',
height=125,
defined_when='gauge_manager'))
return v
class ExplanationPane(TraitsDockPane):
name = 'Explanation'
id = 'pychron.extraction_line.explanation'
def traits_view(self):
v = View(UItem('explanation',
style='custom'))
return v
# ============= EOF =============================================
| apache-2.0 |
tlein/Ancona | Test/ExtLibs/gtest-1.7.0/test/gtest_throw_on_failure_test.py | 2917 | 5766 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO(wan@google.com): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
windedge/odoo | openerp/addons/test_access_rights/tests/test_ir_rules.py | 299 | 1220 | import openerp.exceptions
from openerp.tests.common import TransactionCase
class TestRules(TransactionCase):
def setUp(self):
super(TestRules, self).setUp()
self.id1 = self.env['test_access_right.some_obj']\
.create({'val': 1}).id
self.id2 = self.env['test_access_right.some_obj']\
.create({'val': -1}).id
# create a global rule forbidding access to records with a negative
# (or zero) val
self.env['ir.rule'].create({
'name': 'Forbid negatives',
'model_id': self.browse_ref('test_access_rights.model_test_access_right_some_obj').id,
'domain_force': "[('val', '>', 0)]"
})
def test_basic_access(self):
env = self.env(user=self.browse_ref('base.public_user'))
# put forbidden record in cache
browse2 = env['test_access_right.some_obj'].browse(self.id2)
# this is the one we want
browse1 = env['test_access_right.some_obj'].browse(self.id1)
# this should not blow up
self.assertEqual(browse1.val, 1)
# but this should
with self.assertRaises(openerp.exceptions.AccessError):
self.assertEqual(browse2.val, -1)
| agpl-3.0 |
mozilla/kitsune | wsgi/app.py | 1 | 1084 | """
WSGI config for kitsune project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
# newrelic import & initialization must come first
# https://docs.newrelic.com/docs/agents/python-agent/installation/python-agent-advanced-integration#manual-integration
try:
import newrelic.agent
except ImportError:
newrelic = False
else:
newrelic.agent.initialize('newrelic.ini')
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'kitsune.settings') # NOQA
from django.core.wsgi import get_wsgi_application
from decouple import config
application = get_wsgi_application()
if config('ENABLE_WHITENOISE', default=False, cast=bool):
from whitenoise.django import DjangoWhiteNoise
application = DjangoWhiteNoise(application)
# Add NewRelic
if newrelic:
newrelic_license_key = config('NEW_RELIC_LICENSE_KEY', default=None)
if newrelic_license_key:
application = newrelic.agent.WSGIApplicationWrapper(application)
| bsd-3-clause |
fhaoquan/kbengine | kbe/res/scripts/common/Lib/test/test_wave.py | 92 | 4023 | from test.support import TESTFN
import unittest
from test import audiotests
from audioop import byteswap
import sys
import wave
class WaveTest(audiotests.AudioWriteTests,
audiotests.AudioTestsWithSourceFile):
module = wave
class WavePCM8Test(WaveTest, unittest.TestCase):
sndfilename = 'pluck-pcm8.wav'
sndfilenframes = 3307
nchannels = 2
sampwidth = 1
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
827F CB80 B184 0088 4B86 C883 3F81 837E 387A 3473 A96B 9A66 \
6D64 4662 8E60 6F60 D762 7B68 936F 5877 177B 757C 887B 5F7B \
917A BE7B 3C7C E67F 4F84 C389 418E D192 6E97 0296 FF94 0092 \
C98E D28D 6F8F 4E8F 648C E38A 888A AB8B D18E 0B91 368E C48A \
""")
class WavePCM16Test(WaveTest, unittest.TestCase):
sndfilename = 'pluck-pcm16.wav'
sndfilenframes = 3307
nchannels = 2
sampwidth = 2
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022EFFEA 4B5C00F9 311404EF 80DC0843 CBDF06B2 48AA03F3 BFE701B2 036BFE7C \
B857FA3E B4B2F34F 2999EBCA 1A5FE6D7 EDFCE491 C626E279 0E05E0B8 EF27E02D \
5754E275 FB31E843 1373EF89 D827F72C 978BFB7A F5F7FC11 0866FB9C DF30FB42 \
117FFA36 3EE4FB5D BC75FCB6 66D5FF5F CF16040E 43220978 C1BC0EC8 511F12A4 \
EEDF1755 82061666 7FFF1446 80001296 499C0EB2 52BA0DB9 EFB70F5C CE400FBC \
E4B50CEB 63440A5A 08CA0A1F 2BBA0B0B 51460E47 8BCB113C B6F50EEA 44150A59 \
""")
if sys.byteorder != 'big':
frames = byteswap(frames, 2)
class WavePCM24Test(WaveTest, unittest.TestCase):
sndfilename = 'pluck-pcm24.wav'
sndfilenframes = 3307
nchannels = 2
sampwidth = 3
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022D65FFEB9D 4B5A0F00FA54 3113C304EE2B 80DCD6084303 \
CBDEC006B261 48A99803F2F8 BFE82401B07D 036BFBFE7B5D \
B85756FA3EC9 B4B055F3502B 299830EBCB62 1A5CA7E6D99A \
EDFA3EE491BD C625EBE27884 0E05A9E0B6CF EF2929E02922 \
5758D8E27067 FB3557E83E16 1377BFEF8402 D82C5BF7272A \
978F16FB7745 F5F865FC1013 086635FB9C4E DF30FCFB40EE \
117FE0FA3438 3EE6B8FB5AC3 BC77A3FCB2F4 66D6DAFF5F32 \
CF13B9041275 431D69097A8C C1BB600EC74E 5120B912A2BA \
EEDF641754C0 8207001664B7 7FFFFF14453F 8000001294E6 \
499C1B0EB3B2 52B73E0DBCA0 EFB2B20F5FD8 CE3CDB0FBE12 \
E4B49C0CEA2D 6344A80A5A7C 08C8FE0A1FFE 2BB9860B0A0E \
51486F0E44E1 8BCC64113B05 B6F4EC0EEB36 4413170A5B48 \
""")
if sys.byteorder != 'big':
frames = byteswap(frames, 3)
class WavePCM32Test(WaveTest, unittest.TestCase):
sndfilename = 'pluck-pcm32.wav'
sndfilenframes = 3307
nchannels = 2
sampwidth = 4
framerate = 11025
nframes = 48
comptype = 'NONE'
compname = 'not compressed'
frames = bytes.fromhex("""\
022D65BCFFEB9D92 4B5A0F8000FA549C 3113C34004EE2BC0 80DCD680084303E0 \
CBDEC0C006B26140 48A9980003F2F8FC BFE8248001B07D92 036BFB60FE7B5D34 \
B8575600FA3EC920 B4B05500F3502BC0 29983000EBCB6240 1A5CA7A0E6D99A60 \
EDFA3E80E491BD40 C625EB80E27884A0 0E05A9A0E0B6CFE0 EF292940E0292280 \
5758D800E2706700 FB3557D8E83E1640 1377BF00EF840280 D82C5B80F7272A80 \
978F1600FB774560 F5F86510FC101364 086635A0FB9C4E20 DF30FC40FB40EE28 \
117FE0A0FA3438B0 3EE6B840FB5AC3F0 BC77A380FCB2F454 66D6DA80FF5F32B4 \
CF13B980041275B0 431D6980097A8C00 C1BB60000EC74E00 5120B98012A2BAA0 \
EEDF64C01754C060 820700001664B780 7FFFFFFF14453F40 800000001294E6E0 \
499C1B000EB3B270 52B73E000DBCA020 EFB2B2E00F5FD880 CE3CDB400FBE1270 \
E4B49CC00CEA2D90 6344A8800A5A7CA0 08C8FE800A1FFEE0 2BB986C00B0A0E00 \
51486F800E44E190 8BCC6480113B0580 B6F4EC000EEB3630 441317800A5B48A0 \
""")
if sys.byteorder != 'big':
frames = byteswap(frames, 4)
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
Leibniz137/testinfra | testinfra/test/test_modules.py | 1 | 12968 | # coding: utf-8
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import crypt
import datetime
import re
import time
import pytest
from testinfra.modules.socket import parse_socketspec
all_images = pytest.mark.testinfra_hosts(*[
"docker://{}".format(image)
for image in (
"debian_jessie", "centos_7", "ubuntu_trusty", "fedora",
"ubuntu_xenial",
)
])
@all_images
def test_package(docker_image, Package):
ssh = Package("openssh-server")
version = {
"debian_jessie": "1:6.7",
"debian_wheezy": "1:6.0",
"fedora": "7.",
"ubuntu_trusty": "1:6.6",
"ubuntu_xenial": "1:7.2",
"centos_7": "6.6",
}[docker_image]
assert ssh.is_installed
assert ssh.version.startswith(version)
release = {
"fedora": ".fc25",
"centos_7": ".el7",
"debian_jessie": None,
"debian_wheezy": None,
"ubuntu_trusty": None,
"ubuntu_xenial": None,
}[docker_image]
if release is None:
with pytest.raises(NotImplementedError):
ssh.release
else:
assert release in ssh.release
def test_held_package(Package):
python = Package("python")
assert python.is_installed
assert python.version.startswith("2.7.9")
@all_images
def test_systeminfo(docker_image, SystemInfo):
assert SystemInfo.type == "linux"
release, distribution, codename = {
"debian_jessie": ("^8\.", "debian", "jessie"),
"debian_wheezy": ("^7$", "debian", None),
"centos_7": ("^7$", "centos", None),
"fedora": ("^25$", "fedora", None),
"ubuntu_trusty": ("^14\.04$", "ubuntu", "trusty"),
"ubuntu_xenial": ("^16\.04$", "ubuntu", "xenial"),
}[docker_image]
assert SystemInfo.distribution == distribution
assert SystemInfo.codename == codename
assert re.match(release, SystemInfo.release)
@all_images
def test_ssh_service(docker_image, Service):
if docker_image in ("centos_7", "fedora"):
name = "sshd"
else:
name = "ssh"
ssh = Service(name)
if docker_image == "ubuntu_xenial":
assert not ssh.is_running
else:
assert ssh.is_running
if docker_image in ("ubuntu_trusty", "ubuntu_xenial"):
assert not ssh.is_enabled
else:
assert ssh.is_enabled
@pytest.mark.parametrize("name,running,enabled", [
("ntp", False, True),
("salt-minion", False, False),
])
def test_service(Command, Service, name, running, enabled):
if name == "ntp":
# Systemd say no but sysv say yes
assert Command("systemctl is-enabled ntp").rc == 1
service = Service(name)
assert service.is_running == running
assert service.is_enabled == enabled
def test_salt(Salt):
ssh_version = Salt("pkg.version", "openssh-server", local=True)
assert ssh_version.startswith("1:6.7")
def test_puppet_resource(PuppetResource):
resource = PuppetResource("package", "openssh-server")
assert resource["openssh-server"]["ensure"].startswith("1:6.7")
def test_facter(Facter):
assert Facter()["lsbdistcodename"] == "jessie"
assert Facter("lsbdistcodename") == {
"lsbdistcodename": "jessie",
}
def test_sysctl(Sysctl, Command):
assert Sysctl("kernel.hostname") == Command.check_output("hostname")
assert isinstance(Sysctl("kernel.panic"), int)
def test_parse_socketspec():
assert parse_socketspec("tcp://22") == ("tcp", None, 22)
assert parse_socketspec("tcp://:::22") == ("tcp", "::", 22)
assert parse_socketspec("udp://0.0.0.0:22") == ("udp", "0.0.0.0", 22)
assert parse_socketspec("unix://can:be.any/thing:22") == (
"unix", "can:be.any/thing:22", None)
def test_socket(TestinfraBackend, Socket):
listening = Socket.get_listening_sockets()
for spec in (
"tcp://0.0.0.0:22",
"tcp://:::22",
"unix:///run/systemd/private",
):
assert spec in listening
for spec in (
"tcp://22",
"tcp://0.0.0.0:22",
"tcp://127.0.0.1:22",
"tcp://:::22",
"tcp://::1:22",
):
socket = Socket(spec)
assert socket.is_listening
assert not Socket("tcp://4242").is_listening
if not TestinfraBackend.get_connection_type() == "docker":
# FIXME
for spec in (
"tcp://22",
"tcp://0.0.0.0:22",
):
assert len(Socket(spec).clients) >= 1
@all_images
def test_process(docker_image, Process):
init = Process.get(pid=1)
assert init.ppid == 0
assert init.euid == 0
args, comm = {
"debian_jessie": ("/sbin/init", "systemd"),
"centos_7": ("/usr/sbin/init", "systemd"),
"fedora": ("/usr/sbin/init", "systemd"),
"ubuntu_trusty": ("/usr/sbin/sshd -D", "sshd"),
"ubuntu_xenial": ("/sbin/init", "systemd"),
"debian_wheezy": ("/usr/sbin/sshd -D", "sshd"),
}[docker_image]
assert init.args == args
assert init.comm == comm
def test_user(User):
user = User("sshd")
assert user.exists
assert user.name == "sshd"
assert user.uid == 105
assert user.gid == 65534
assert user.group == "nogroup"
assert user.gids == [65534]
assert user.groups == ["nogroup"]
assert user.shell == "/usr/sbin/nologin"
assert user.home == "/var/run/sshd"
assert user.password == "*"
def test_user_user(User):
user = User("user")
assert user.exists
assert user.gecos == "gecos.comment"
def test_user_expiration_date(User):
assert User("root").expiration_date is None
assert User("user").expiration_date == datetime.datetime(2024, 10, 4, 0, 0)
def test_nonexistent_user(User):
assert not User("zzzzzzzzzz").exists
def test_current_user(User):
assert User().name == "root"
pw = User().password
assert crypt.crypt("foo", pw) == pw
def test_group(Group):
assert Group("root").exists
assert Group("root").gid == 0
def test_empty_command_output(Command):
assert Command.check_output("printf ''") == ""
def test_local_command(LocalCommand):
assert LocalCommand.check_output("true") == ""
def test_file(Command, SystemInfo, File):
Command.check_output("mkdir -p /d && printf foo > /d/f && chmod 600 /d/f")
d = File("/d")
assert d.is_directory
assert not d.is_file
f = File("/d/f")
assert f.exists
assert f.is_file
assert f.content == b"foo"
assert f.content_string == "foo"
assert f.user == "root"
assert f.uid == 0
assert f.gid == 0
assert f.group == "root"
assert f.mode == 0o600
assert f.contains("fo")
assert not f.is_directory
assert not f.is_symlink
assert not f.is_pipe
assert f.linked_to == "/d/f"
assert f.size == 3
assert f.md5sum == "acbd18db4cc2f85cedef654fccc4a4d8"
assert f.sha256sum == (
"2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"
)
Command.check_output("ln -fsn /d/f /d/l")
l = File("/d/l")
assert l.is_symlink
assert l.is_file
assert l.linked_to == "/d/f"
Command.check_output("rm -f /d/p && mkfifo /d/p")
assert File("/d/p").is_pipe
def test_ansible_unavailable(Ansible):
with pytest.raises(RuntimeError) as excinfo:
Ansible("setup")
assert (
'Ansible module is only available with ansible '
'connection backend') in str(excinfo.value)
@pytest.mark.testinfra_hosts("ansible://debian_jessie")
def test_ansible_module(TestinfraBackend, Ansible):
import ansible
version = int(ansible.__version__.split(".", 1)[0])
setup = Ansible("setup")["ansible_facts"]
assert setup["ansible_lsb"]["codename"] == "jessie"
passwd = Ansible("file", "path=/etc/passwd state=file")
assert passwd["changed"] is False
assert passwd["gid"] == 0
assert passwd["group"] == "root"
assert passwd["mode"] == "0644"
assert passwd["owner"] == "root"
assert isinstance(passwd["size"], int)
assert passwd["path"] == "/etc/passwd"
# seems to vary with differents docker fs backend
assert passwd["state"] in ("file", "hard")
assert passwd["uid"] == 0
variables = Ansible.get_variables()
assert variables["myvar"] == "foo"
assert variables["myhostvar"] == "bar"
assert variables["mygroupvar"] == "qux"
assert variables["inventory_hostname"] == "debian_jessie"
assert variables["group_names"] == ["testgroup"]
# test errors reporting
with pytest.raises(Ansible.AnsibleException) as excinfo:
Ansible("file", "path=/etc/passwd an_unexpected=variable")
tb = str(excinfo.value)
assert 'unsupported parameter' in tb.lower()
with pytest.raises(Ansible.AnsibleException) as excinfo:
Ansible("command", "zzz")
if version == 1:
msg = "check mode not supported for command"
else:
msg = "Skipped. You might want to try check=False"
assert excinfo.value.result['msg'] == msg
try:
Ansible("command", "zzz", check=False)
except Ansible.AnsibleException as exc:
assert exc.result['rc'] == 2
if version == 1:
assert exc.result['msg'] == '[Errno 2] No such file or directory'
else:
assert exc.result['msg'] == ('[Errno 2] Aucun fichier ou dossier '
'de ce type')
result = Ansible("command", "echo foo", check=False)
assert result['stdout'] == 'foo'
@pytest.mark.destructive
def test_supervisor(Command, Service, Supervisor, Process):
# Wait supervisord is running
for _ in range(20):
if Service("supervisor").is_running:
break
time.sleep(.5)
else:
raise RuntimeError("No running supervisor")
for _ in range(20):
service = Supervisor("tail")
if service.status == "RUNNING":
break
else:
assert service.status == "STARTING"
time.sleep(.5)
else:
raise RuntimeError("No running tail in supervisor")
assert service.is_running
proc = Process.get(pid=service.pid)
assert proc.comm == "tail"
services = Supervisor.get_services()
assert len(services) == 1
assert services[0].name == "tail"
assert services[0].is_running
assert services[0].pid == service.pid
Command("supervisorctl stop tail")
service = Supervisor("tail")
assert not service.is_running
assert service.status == "STOPPED"
assert service.pid is None
Command("service supervisor stop")
assert not Service("supervisor").is_running
with pytest.raises(RuntimeError) as excinfo:
Supervisor("tail").is_running
assert 'Is supervisor running' in str(excinfo.value)
def test_mountpoint(MountPoint):
root_mount = MountPoint('/')
assert root_mount.exists
assert isinstance(root_mount.options, list)
assert 'rw' in root_mount.options
assert root_mount.filesystem
fake_mount = MountPoint('/fake/mount')
assert not fake_mount.exists
mountpoints = MountPoint.get_mountpoints()
assert mountpoints
assert all(isinstance(m, MountPoint) for m in mountpoints)
assert len([m for m in mountpoints if m.path == "/"]) == 1
def test_sudo_from_root(Sudo, User):
assert User().name == "root"
with Sudo("user"):
assert User().name == "user"
assert User().name == "root"
def test_sudo_fail_from_root(Command, Sudo, User):
assert User().name == "root"
with pytest.raises(AssertionError) as exc:
with Sudo("unprivileged"):
assert User().name == "unprivileged"
Command.check_output('ls /root/invalid')
assert str(exc.value).startswith('Unexpected exit code')
with Sudo():
assert User().name == "root"
@pytest.mark.testinfra_hosts("docker://user@debian_jessie")
def test_sudo_to_root(Sudo, User):
assert User().name == "user"
with Sudo():
assert User().name == "root"
# Test nested sudo
with Sudo("www-data"):
assert User().name == "www-data"
assert User().name == "user"
def test_pip_package(PipPackage):
assert PipPackage.get_packages()['pip']['version'] == '1.5.6'
pytest = PipPackage.get_packages(pip_path='/v/bin/pip')['pytest']
assert pytest['version'].startswith('2.')
outdated = PipPackage.get_outdated_packages(
pip_path='/v/bin/pip')['pytest']
assert outdated['current'] == pytest['version']
assert int(outdated['latest'].split('.')[0]) > 2
| apache-2.0 |
nuggetbram/gammacoin | contrib/testgen/base58.py | 2139 | 2818 | '''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| mit |
ryanbressler/ClassWar | sklrf.py | 6 | 1280 | import sys
from sklearn.datasets import load_svmlight_file
from sklearn.ensemble import RandomForestClassifier
from time import time
import numpy as np
def dumptree(atree, fn):
from sklearn import tree
f = open(fn,"w")
tree.export_graphviz(atree,out_file=f)
f.close()
# def main():
fn = sys.argv[1]
X,Y = load_svmlight_file(fn)
rf_parameters = {
"n_estimators": 500,
"n_jobs": 1
}
clf = RandomForestClassifier(**rf_parameters)
X = X.toarray()
print clf
print "Starting Training"
t0 = time()
clf.fit(X, Y)
train_time = time() - t0
print "Training on %s took %s"%(fn, train_time)
print "Total training time (seconds): %s"%(train_time)
if len(sys.argv) == 2:
score = clf.score(X, Y)
count = np.sum(clf.predict(X)==Y)
print "Score: %s, %s / %s "%(score, count, len(Y))
else:
fn = sys.argv[2]
X,Y = load_svmlight_file(fn)
X = X.toarray()
score = clf.score(X, Y)
count = np.sum(clf.predict(X)==Y)
c1 = np.sum(clf.predict(X[Y==1])==Y[Y==1] )
c0 = np.sum(clf.predict(X[Y==0])==Y[Y==0] )
l = len(Y)
print "Error: %s"%(1-(float(c1)/float(sum(Y==1))+float(c0)/float(sum(Y==0)))/2.0)
print "Testing Score: %s, %s / %s, %s, %s, %s "%(score, count, l, c1, c0, (float(c1)/float(sum(Y==1))+float(c0)/float(sum(Y==0)))/2.0)
# if __name__ == '__main__':
# main()
| bsd-3-clause |
nickpack/django-oscar | tests/unit/offer/condition_tests.py | 35 | 1861 | from django.test import TestCase
from django.utils import six
from oscar.apps.offer import custom, models
from oscar.apps.basket.models import Basket
from oscar.test import factories
class TestConditionProxyModels(TestCase):
def test_name_and_description(self):
"""
Tests that the condition proxy classes all return a name and
description. Unfortunately, the current implementations means
a valid range and value are required.
This test became necessary because the complex name/description logic
broke with the python_2_unicode_compatible decorator.
"""
range = factories.RangeFactory()
for type, __ in models.Condition.TYPE_CHOICES:
condition = models.Condition(type=type, range=range, value=5)
self.assertTrue(all([
condition.name,
condition.description,
six.text_type(condition)]))
class BasketOwnerCalledBarry(models.Condition):
class Meta:
proxy = True
def is_satisfied(self, offer, basket):
if not basket.owner:
return False
return basket.owner.first_name.lower() == 'barry'
def can_apply_condition(self, product):
return False
class TestCustomCondition(TestCase):
def setUp(self):
self.condition = custom.create_condition(BasketOwnerCalledBarry)
self.offer = models.ConditionalOffer(condition=self.condition)
self.basket = Basket()
def test_is_not_satified_by_non_match(self):
self.basket.owner = factories.UserFactory(first_name="Alan")
self.assertFalse(self.offer.is_condition_satisfied(self.basket))
def test_is_satified_by_match(self):
self.basket.owner = factories.UserFactory(first_name="Barry")
self.assertTrue(self.offer.is_condition_satisfied(self.basket))
| bsd-3-clause |
janusnic/django-allauth | docs/conf.py | 4 | 7798 | # -*- coding: utf-8 -*-
#
# django-allauth documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 6 22:58:42 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-allauth'
copyright = u'2015, Raymond Penners'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.23.0'
# The full version, including alpha/beta/rc tags.
release = '0.23.0-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-allauthdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-allauth.tex', u'django-allauth Documentation',
u'Raymond Penners', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-allauth', u'django-allauth Documentation',
[u'Raymond Penners'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-allauth', u'django-allauth Documentation',
u'Raymond Penners', 'django-allauth', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
Snifer/BurpSuite-Plugins | burp-protobuf-decoder/Lib/google/protobuf/descriptor.py | 228 | 26625 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Descriptors essentially contain exactly the information found in a .proto
file, in types that make this information accessible in Python.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from google.protobuf.internal import api_implementation
if api_implementation.Type() == 'cpp':
if api_implementation.Version() == 2:
from google.protobuf.internal.cpp import _message
else:
from google.protobuf.internal import cpp_message
class Error(Exception):
"""Base error for this module."""
class TypeTransformationError(Error):
"""Error transforming between python proto type and corresponding C++ type."""
class DescriptorBase(object):
"""Descriptors base class.
This class is the base of all descriptor classes. It provides common options
related functionaility.
Attributes:
has_options: True if the descriptor has non-default options. Usually it
is not necessary to read this -- just call GetOptions() which will
happily return the default instance. However, it's sometimes useful
for efficiency, and also useful inside the protobuf implementation to
avoid some bootstrapping issues.
"""
def __init__(self, options, options_class_name):
"""Initialize the descriptor given its options message and the name of the
class of the options message. The name of the class is required in case
the options message is None and has to be created.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def _SetOptions(self, options, options_class_name):
"""Sets the descriptor's options
This function is used in generated proto2 files to update descriptor
options. It must not be used outside proto2.
"""
self._options = options
self._options_class_name = options_class_name
# Does this descriptor have non-default options?
self.has_options = options is not None
def GetOptions(self):
"""Retrieves descriptor options.
This method returns the options set or creates the default options for the
descriptor.
"""
if self._options:
return self._options
from google.protobuf import descriptor_pb2
try:
options_class = getattr(descriptor_pb2, self._options_class_name)
except AttributeError:
raise RuntimeError('Unknown options class name %s!' %
(self._options_class_name))
self._options = options_class()
return self._options
class _NestedDescriptorBase(DescriptorBase):
"""Common class for descriptors that can be nested."""
def __init__(self, options, options_class_name, name, full_name,
file, containing_type, serialized_start=None,
serialized_end=None):
"""Constructor.
Args:
options: Protocol message options or None
to use default message options.
options_class_name: (str) The class name of the above options.
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
file: (FileDescriptor) Reference to file info.
containing_type: if provided, this is a nested descriptor, with this
descriptor as parent, otherwise None.
serialized_start: The start index (inclusive) in block in the
file.serialized_pb that describes this descriptor.
serialized_end: The end index (exclusive) in block in the
file.serialized_pb that describes this descriptor.
"""
super(_NestedDescriptorBase, self).__init__(
options, options_class_name)
self.name = name
# TODO(falk): Add function to calculate full_name instead of having it in
# memory?
self.full_name = full_name
self.file = file
self.containing_type = containing_type
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def GetTopLevelContainingType(self):
"""Returns the root if this is a nested type, or itself if its the root."""
desc = self
while desc.containing_type is not None:
desc = desc.containing_type
return desc
def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.')
class Descriptor(_NestedDescriptorBase):
"""Descriptor for a protocol message type.
A Descriptor instance has the following attributes:
name: (str) Name of this protocol message type.
full_name: (str) Fully-qualified name of this protocol message type,
which will include protocol "package" name and the name of any
enclosing types.
containing_type: (Descriptor) Reference to the descriptor of the
type containing us, or None if this is top-level.
fields: (list of FieldDescriptors) Field descriptors for all
fields in this type.
fields_by_number: (dict int -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "number" attribute in each
FieldDescriptor.
fields_by_name: (dict str -> FieldDescriptor) Same FieldDescriptor
objects as in |fields|, but indexed by "name" attribute in each
FieldDescriptor.
nested_types: (list of Descriptors) Descriptor references
for all protocol message types nested within this one.
nested_types_by_name: (dict str -> Descriptor) Same Descriptor
objects as in |nested_types|, but indexed by "name" attribute
in each Descriptor.
enum_types: (list of EnumDescriptors) EnumDescriptor references
for all enums contained within this type.
enum_types_by_name: (dict str ->EnumDescriptor) Same EnumDescriptor
objects as in |enum_types|, but indexed by "name" attribute
in each EnumDescriptor.
enum_values_by_name: (dict str -> EnumValueDescriptor) Dict mapping
from enum value name to EnumValueDescriptor for that value.
extensions: (list of FieldDescriptor) All extensions defined directly
within this message type (NOT within a nested type).
extensions_by_name: (dict, string -> FieldDescriptor) Same FieldDescriptor
objects as |extensions|, but indexed by "name" attribute of each
FieldDescriptor.
is_extendable: Does this type define any extension ranges?
options: (descriptor_pb2.MessageOptions) Protocol message options or None
to use default message options.
file: (FileDescriptor) Reference to file descriptor.
"""
def __init__(self, name, full_name, filename, containing_type, fields,
nested_types, enum_types, extensions, options=None,
is_extendable=True, extension_ranges=None, file=None,
serialized_start=None, serialized_end=None):
"""Arguments to __init__() are as described in the description
of Descriptor fields above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(Descriptor, self).__init__(
options, 'MessageOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_start)
# We have fields in addition to fields_by_name and fields_by_number,
# so that:
# 1. Clients can index fields by "order in which they're listed."
# 2. Clients can easily iterate over all fields with the terse
# syntax: for f in descriptor.fields: ...
self.fields = fields
for field in self.fields:
field.containing_type = self
self.fields_by_number = dict((f.number, f) for f in fields)
self.fields_by_name = dict((f.name, f) for f in fields)
self.nested_types = nested_types
self.nested_types_by_name = dict((t.name, t) for t in nested_types)
self.enum_types = enum_types
for enum_type in self.enum_types:
enum_type.containing_type = self
self.enum_types_by_name = dict((t.name, t) for t in enum_types)
self.enum_values_by_name = dict(
(v.name, v) for t in enum_types for v in t.values)
self.extensions = extensions
for extension in self.extensions:
extension.extension_scope = self
self.extensions_by_name = dict((f.name, f) for f in extensions)
self.is_extendable = is_extendable
self.extension_ranges = extension_ranges
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def EnumValueName(self, enum, value):
"""Returns the string name of an enum value.
This is just a small helper method to simplify a common operation.
Args:
enum: string name of the Enum.
value: int, value of the enum.
Returns:
string name of the enum value.
Raises:
KeyError if either the Enum doesn't exist or the value is not a valid
value for the enum.
"""
return self.enum_types_by_name[enum].values_by_number[value].name
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.DescriptorProto.
Args:
proto: An empty descriptor_pb2.DescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(Descriptor, self).CopyToProto(proto)
# TODO(robinson): We should have aggressive checking here,
# for example:
# * If you specify a repeated field, you should not be allowed
# to specify a default value.
# * [Other examples here as needed].
#
# TODO(robinson): for this and other *Descriptor classes, we
# might also want to lock things down aggressively (e.g.,
# prevent clients from setting the attributes). Having
# stronger invariants here in general will reduce the number
# of runtime checks we must do in reflection.py...
class FieldDescriptor(DescriptorBase):
"""Descriptor for a single field in a .proto file.
A FieldDescriptor instance has the following attributes:
name: (str) Name of this field, exactly as it appears in .proto.
full_name: (str) Name of this field, including containing scope. This is
particularly relevant for extensions.
index: (int) Dense, 0-indexed index giving the order that this
field textually appears within its message in the .proto file.
number: (int) Tag number declared for this field in the .proto file.
type: (One of the TYPE_* constants below) Declared type.
cpp_type: (One of the CPPTYPE_* constants below) C++ type used to
represent this field.
label: (One of the LABEL_* constants below) Tells whether this
field is optional, required, or repeated.
has_default_value: (bool) True if this field has a default value defined,
otherwise false.
default_value: (Varies) Default value of this field. Only
meaningful for non-repeated scalar fields. Repeated fields
should always set this to [], and non-repeated composite
fields should always set this to None.
containing_type: (Descriptor) Descriptor of the protocol message
type that contains this field. Set by the Descriptor constructor
if we're passed into one.
Somewhat confusingly, for extension fields, this is the
descriptor of the EXTENDED message, not the descriptor
of the message containing this field. (See is_extension and
extension_scope below).
message_type: (Descriptor) If a composite field, a descriptor
of the message type contained in this field. Otherwise, this is None.
enum_type: (EnumDescriptor) If this field contains an enum, a
descriptor of that enum. Otherwise, this is None.
is_extension: True iff this describes an extension field.
extension_scope: (Descriptor) Only meaningful if is_extension is True.
Gives the message that immediately contains this extension field.
Will be None iff we're a top-level (file-level) extension field.
options: (descriptor_pb2.FieldOptions) Protocol message field options or
None to use default field options.
"""
# Must be consistent with C++ FieldDescriptor::Type enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
MAX_TYPE = 18
# Must be consistent with C++ FieldDescriptor::CppType enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
CPPTYPE_INT32 = 1
CPPTYPE_INT64 = 2
CPPTYPE_UINT32 = 3
CPPTYPE_UINT64 = 4
CPPTYPE_DOUBLE = 5
CPPTYPE_FLOAT = 6
CPPTYPE_BOOL = 7
CPPTYPE_ENUM = 8
CPPTYPE_STRING = 9
CPPTYPE_MESSAGE = 10
MAX_CPPTYPE = 10
_PYTHON_TO_CPP_PROTO_TYPE_MAP = {
TYPE_DOUBLE: CPPTYPE_DOUBLE,
TYPE_FLOAT: CPPTYPE_FLOAT,
TYPE_ENUM: CPPTYPE_ENUM,
TYPE_INT64: CPPTYPE_INT64,
TYPE_SINT64: CPPTYPE_INT64,
TYPE_SFIXED64: CPPTYPE_INT64,
TYPE_UINT64: CPPTYPE_UINT64,
TYPE_FIXED64: CPPTYPE_UINT64,
TYPE_INT32: CPPTYPE_INT32,
TYPE_SFIXED32: CPPTYPE_INT32,
TYPE_SINT32: CPPTYPE_INT32,
TYPE_UINT32: CPPTYPE_UINT32,
TYPE_FIXED32: CPPTYPE_UINT32,
TYPE_BYTES: CPPTYPE_STRING,
TYPE_STRING: CPPTYPE_STRING,
TYPE_BOOL: CPPTYPE_BOOL,
TYPE_MESSAGE: CPPTYPE_MESSAGE,
TYPE_GROUP: CPPTYPE_MESSAGE
}
# Must be consistent with C++ FieldDescriptor::Label enum in
# descriptor.h.
#
# TODO(robinson): Find a way to eliminate this repetition.
LABEL_OPTIONAL = 1
LABEL_REQUIRED = 2
LABEL_REPEATED = 3
MAX_LABEL = 3
def __init__(self, name, full_name, index, number, type, cpp_type, label,
default_value, message_type, enum_type, containing_type,
is_extension, extension_scope, options=None,
has_default_value=True):
"""The arguments are as described in the description of FieldDescriptor
attributes above.
Note that containing_type may be None, and may be set later if necessary
(to deal with circular references between message types, for example).
Likewise for extension_scope.
"""
super(FieldDescriptor, self).__init__(options, 'FieldOptions')
self.name = name
self.full_name = full_name
self.index = index
self.number = number
self.type = type
self.cpp_type = cpp_type
self.label = label
self.has_default_value = has_default_value
self.default_value = default_value
self.containing_type = containing_type
self.message_type = message_type
self.enum_type = enum_type
self.is_extension = is_extension
self.extension_scope = extension_scope
if api_implementation.Type() == 'cpp':
if is_extension:
if api_implementation.Version() == 2:
self._cdescriptor = _message.GetExtensionDescriptor(full_name)
else:
self._cdescriptor = cpp_message.GetExtensionDescriptor(full_name)
else:
if api_implementation.Version() == 2:
self._cdescriptor = _message.GetFieldDescriptor(full_name)
else:
self._cdescriptor = cpp_message.GetFieldDescriptor(full_name)
else:
self._cdescriptor = None
@staticmethod
def ProtoTypeToCppProtoType(proto_type):
"""Converts from a Python proto type to a C++ Proto Type.
The Python ProtocolBuffer classes specify both the 'Python' datatype and the
'C++' datatype - and they're not the same. This helper method should
translate from one to another.
Args:
proto_type: the Python proto type (descriptor.FieldDescriptor.TYPE_*)
Returns:
descriptor.FieldDescriptor.CPPTYPE_*, the C++ type.
Raises:
TypeTransformationError: when the Python proto type isn't known.
"""
try:
return FieldDescriptor._PYTHON_TO_CPP_PROTO_TYPE_MAP[proto_type]
except KeyError:
raise TypeTransformationError('Unknown proto_type: %s' % proto_type)
class EnumDescriptor(_NestedDescriptorBase):
"""Descriptor for an enum defined in a .proto file.
An EnumDescriptor instance has the following attributes:
name: (str) Name of the enum type.
full_name: (str) Full name of the type, including package name
and any enclosing type(s).
values: (list of EnumValueDescriptors) List of the values
in this enum.
values_by_name: (dict str -> EnumValueDescriptor) Same as |values|,
but indexed by the "name" field of each EnumValueDescriptor.
values_by_number: (dict int -> EnumValueDescriptor) Same as |values|,
but indexed by the "number" field of each EnumValueDescriptor.
containing_type: (Descriptor) Descriptor of the immediate containing
type of this enum, or None if this is an enum defined at the
top level in a .proto file. Set by Descriptor's constructor
if we're passed into one.
file: (FileDescriptor) Reference to file descriptor.
options: (descriptor_pb2.EnumOptions) Enum options message or
None to use default enum options.
"""
def __init__(self, name, full_name, filename, values,
containing_type=None, options=None, file=None,
serialized_start=None, serialized_end=None):
"""Arguments are as described in the attribute description above.
Note that filename is an obsolete argument, that is not used anymore.
Please use file.name to access this as an attribute.
"""
super(EnumDescriptor, self).__init__(
options, 'EnumOptions', name, full_name, file,
containing_type, serialized_start=serialized_start,
serialized_end=serialized_start)
self.values = values
for value in self.values:
value.type = self
self.values_by_name = dict((v.name, v) for v in values)
self.values_by_number = dict((v.number, v) for v in values)
self._serialized_start = serialized_start
self._serialized_end = serialized_end
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.EnumDescriptorProto.
Args:
proto: An empty descriptor_pb2.EnumDescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(EnumDescriptor, self).CopyToProto(proto)
class EnumValueDescriptor(DescriptorBase):
"""Descriptor for a single value within an enum.
name: (str) Name of this value.
index: (int) Dense, 0-indexed index giving the order that this
value appears textually within its enum in the .proto file.
number: (int) Actual number assigned to this enum value.
type: (EnumDescriptor) EnumDescriptor to which this value
belongs. Set by EnumDescriptor's constructor if we're
passed into one.
options: (descriptor_pb2.EnumValueOptions) Enum value options message or
None to use default enum value options options.
"""
def __init__(self, name, index, number, type=None, options=None):
"""Arguments are as described in the attribute description above."""
super(EnumValueDescriptor, self).__init__(options, 'EnumValueOptions')
self.name = name
self.index = index
self.number = number
self.type = type
class ServiceDescriptor(_NestedDescriptorBase):
"""Descriptor for a service.
name: (str) Name of the service.
full_name: (str) Full name of the service, including package name.
index: (int) 0-indexed index giving the order that this services
definition appears withing the .proto file.
methods: (list of MethodDescriptor) List of methods provided by this
service.
options: (descriptor_pb2.ServiceOptions) Service options message or
None to use default service options.
file: (FileDescriptor) Reference to file info.
"""
def __init__(self, name, full_name, index, methods, options=None, file=None,
serialized_start=None, serialized_end=None):
super(ServiceDescriptor, self).__init__(
options, 'ServiceOptions', name, full_name, file,
None, serialized_start=serialized_start,
serialized_end=serialized_end)
self.index = index
self.methods = methods
# Set the containing service for each method in this service.
for method in self.methods:
method.containing_service = self
def FindMethodByName(self, name):
"""Searches for the specified method, and returns its descriptor."""
for method in self.methods:
if name == method.name:
return method
return None
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.ServiceDescriptorProto.
Args:
proto: An empty descriptor_pb2.ServiceDescriptorProto.
"""
# This function is overriden to give a better doc comment.
super(ServiceDescriptor, self).CopyToProto(proto)
class MethodDescriptor(DescriptorBase):
"""Descriptor for a method in a service.
name: (str) Name of the method within the service.
full_name: (str) Full name of method.
index: (int) 0-indexed index of the method inside the service.
containing_service: (ServiceDescriptor) The service that contains this
method.
input_type: The descriptor of the message that this method accepts.
output_type: The descriptor of the message that this method returns.
options: (descriptor_pb2.MethodOptions) Method options message or
None to use default method options.
"""
def __init__(self, name, full_name, index, containing_service,
input_type, output_type, options=None):
"""The arguments are as described in the description of MethodDescriptor
attributes above.
Note that containing_service may be None, and may be set later if necessary.
"""
super(MethodDescriptor, self).__init__(options, 'MethodOptions')
self.name = name
self.full_name = full_name
self.index = index
self.containing_service = containing_service
self.input_type = input_type
self.output_type = output_type
class FileDescriptor(DescriptorBase):
"""Descriptor for a file. Mimics the descriptor_pb2.FileDescriptorProto.
name: name of file, relative to root of source tree.
package: name of the package
serialized_pb: (str) Byte string of serialized
descriptor_pb2.FileDescriptorProto.
"""
def __init__(self, name, package, options=None, serialized_pb=None):
"""Constructor."""
super(FileDescriptor, self).__init__(options, 'FileOptions')
self.message_types_by_name = {}
self.name = name
self.package = package
self.serialized_pb = serialized_pb
if (api_implementation.Type() == 'cpp' and
self.serialized_pb is not None):
if api_implementation.Version() == 2:
_message.BuildFile(self.serialized_pb)
else:
cpp_message.BuildFile(self.serialized_pb)
def CopyToProto(self, proto):
"""Copies this to a descriptor_pb2.FileDescriptorProto.
Args:
proto: An empty descriptor_pb2.FileDescriptorProto.
"""
proto.ParseFromString(self.serialized_pb)
def _ParseOptions(message, string):
"""Parses serialized options.
This helper function is used to parse serialized options in generated
proto2 files. It must not be used outside proto2.
"""
message.ParseFromString(string)
return message
def MakeDescriptor(desc_proto, package=''):
"""Make a protobuf Descriptor given a DescriptorProto protobuf.
Args:
desc_proto: The descriptor_pb2.DescriptorProto protobuf message.
package: Optional package name for the new message Descriptor (string).
Returns:
A Descriptor for protobuf messages.
"""
full_message_name = [desc_proto.name]
if package: full_message_name.insert(0, package)
fields = []
for field_proto in desc_proto.field:
full_name = '.'.join(full_message_name + [field_proto.name])
field = FieldDescriptor(
field_proto.name, full_name, field_proto.number - 1,
field_proto.number, field_proto.type,
FieldDescriptor.ProtoTypeToCppProtoType(field_proto.type),
field_proto.label, None, None, None, None, False, None,
has_default_value=False)
fields.append(field)
desc_name = '.'.join(full_message_name)
return Descriptor(desc_proto.name, desc_name, None, None, fields,
[], [], [])
| gpl-2.0 |
marratj/ansible | lib/ansible/modules/network/cloudengine/ce_netstream_template.py | 27 | 15967 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_netstream_template
version_added: "2.4"
short_description: Manages NetStream template configuration on HUAWEI CloudEngine switches.
description:
- Manages NetStream template configuration on HUAWEI CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present', 'absent']
type:
description:
- Configure the type of netstream record.
required: true
choices: ['ip', 'vxlan']
record_name:
description:
- Configure the name of netstream record.
The value is a string of 1 to 32 case-insensitive characters.
required: false
default: null
match:
description:
- Configure flexible flow statistics template keywords.
required: false
default: null
choices: ['destination-address', 'destination-port', 'tos', 'protocol', 'source-address', 'source-port']
collect_counter:
description:
- Configure the number of packets and bytes that are included in the flexible flow statistics sent to NSC.
required: false
default: null
choices: ['bytes', 'packets']
collect_interface:
description:
- Configure the input or output interface that are included in the flexible flow statistics sent to NSC.
required: false
default: null
choices: ['input', 'output']
description:
description:
- Configure the description of netstream record.
The value is a string of 1 to 80 case-insensitive characters.
required: false
default: null
'''
EXAMPLES = '''
- name: netstream template module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Config ipv4 netstream record
ce_netstream_template:
state: present
type: ip
record_name: test
provider: "{{ cli }}"
- name: Undo ipv4 netstream record
ce_netstream_template:
state: absent
type: ip
record_name: test
provider: "{{ cli }}"
- name: Config ipv4 netstream record collect_counter
ce_netstream_template:
state: present
type: ip
record_name: test
collect_counter: bytes
provider: "{{ cli }}"
- name: Undo ipv4 netstream record collect_counter
ce_netstream_template:
state: absent
type: ip
record_name: test
collect_counter: bytes
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"record_name": "test",
"type": "ip",
"state": "present"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"record_name": "test",
"type": "ip"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["netstream record test ip"]
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config
from ansible.module_utils.ce import ce_argument_spec
class NetstreamTemplate(object):
""" Manages netstream template configuration """
def __init__(self, **kwargs):
""" Netstream template module init """
# module
argument_spec = kwargs["argument_spec"]
self.spec = argument_spec
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
# netstream config
self.netstream_cfg = None
# module args
self.state = self.module.params['state'] or None
self.type = self.module.params['type'] or None
self.record_name = self.module.params['record_name'] or None
self.match = self.module.params['match'] or None
self.collect_counter = self.module.params['collect_counter'] or None
self.collect_interface = self.module.params['collect_interface'] or None
self.description = self.module.params['description'] or None
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
def cli_load_config(self, commands):
""" Cli load configuration """
if not self.module.check_mode:
load_config(self.module, commands)
def cli_get_netstream_config(self):
""" Cli get netstream configuration """
if self.type == "ip":
cmd = "netstream record %s ip" % self.record_name
else:
cmd = "netstream record %s vxlan inner-ip" % self.record_name
flags = list()
regular = "| section include %s" % cmd
flags.append(regular)
self.netstream_cfg = get_config(self.module, flags)
def check_args(self):
""" Check module args """
if not self.type or not self.record_name:
self.module.fail_json(
msg='Error: Please input type and record_name.')
if self.record_name:
if len(self.record_name) < 1 or len(self.record_name) > 32:
self.module.fail_json(
msg='Error: The len of record_name is out of [1 - 32].')
if self.description:
if len(self.description) < 1 or len(self.description) > 80:
self.module.fail_json(
msg='Error: The len of description is out of [1 - 80].')
def get_proposed(self):
""" Get module proposed """
self.proposed["state"] = self.state
if self.type:
self.proposed["type"] = self.type
if self.record_name:
self.proposed["record_name"] = self.record_name
if self.match:
self.proposed["match"] = self.match
if self.collect_counter:
self.proposed["collect_counter"] = self.collect_counter
if self.collect_interface:
self.proposed["collect_interface"] = self.collect_interface
if self.description:
self.proposed["description"] = self.description
def get_existing(self):
""" Get existing configuration """
self.cli_get_netstream_config()
if self.netstream_cfg:
self.existing["type"] = self.type
self.existing["record_name"] = self.record_name
if self.description:
tmp_value = re.findall(r'description (.*)', self.netstream_cfg)
if tmp_value:
self.existing["description"] = tmp_value[0]
if self.match:
if self.type == "ip":
tmp_value = re.findall(r'match ip (.*)', self.netstream_cfg)
else:
tmp_value = re.findall(r'match inner-ip (.*)', self.netstream_cfg)
if tmp_value:
self.existing["match"] = tmp_value
if self.collect_counter:
tmp_value = re.findall(r'collect counter (.*)', self.netstream_cfg)
if tmp_value:
self.existing["collect_counter"] = tmp_value
if self.collect_interface:
tmp_value = re.findall(r'collect interface (.*)', self.netstream_cfg)
if tmp_value:
self.existing["collect_interface"] = tmp_value
def get_end_state(self):
""" Get end state """
self.cli_get_netstream_config()
if self.netstream_cfg:
self.end_state["type"] = self.type
self.end_state["record_name"] = self.record_name
if self.description:
tmp_value = re.findall(r'description (.*)', self.netstream_cfg)
if tmp_value:
self.end_state["description"] = tmp_value[0]
if self.match:
if self.type == "ip":
tmp_value = re.findall(r'match ip (.*)', self.netstream_cfg)
else:
tmp_value = re.findall(r'match inner-ip (.*)', self.netstream_cfg)
if tmp_value:
self.end_state["match"] = tmp_value
if self.collect_counter:
tmp_value = re.findall(r'collect counter (.*)', self.netstream_cfg)
if tmp_value:
self.end_state["collect_counter"] = tmp_value
if self.collect_interface:
tmp_value = re.findall(r'collect interface (.*)', self.netstream_cfg)
if tmp_value:
self.end_state["collect_interface"] = tmp_value
def present_netstream(self):
""" Present netstream configuration """
cmds = list()
need_create_record = False
if self.type == "ip":
cmd = "netstream record %s ip" % self.record_name
else:
cmd = "netstream record %s vxlan inner-ip" % self.record_name
cmds.append(cmd)
if not self.netstream_cfg:
self.updates_cmd.append(cmd)
need_create_record = True
if self.description:
cmd = "description %s" % self.description
if not self.netstream_cfg or cmd not in self.netstream_cfg:
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.match:
if self.type == "ip":
cmd = "match ip %s" % self.match
cfg = "match ip"
else:
cmd = "match inner-ip %s" % self.match
cfg = "match inner-ip"
if not self.netstream_cfg or cfg not in self.netstream_cfg or self.match != self.existing["match"][0]:
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.collect_counter:
cmd = "collect counter %s" % self.collect_counter
if not self.netstream_cfg or cmd not in self.netstream_cfg:
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.collect_interface:
cmd = "collect interface %s" % self.collect_interface
if not self.netstream_cfg or cmd not in self.netstream_cfg:
cmds.append(cmd)
self.updates_cmd.append(cmd)
if not need_create_record and len(cmds) == 1:
if self.type == "ip":
cmd = "netstream record %s ip" % self.record_name
else:
cmd = "netstream record %s vxlan inner-ip" % self.record_name
cmds.remove(cmd)
if cmds:
self.cli_load_config(cmds)
self.changed = True
def absent_netstream(self):
""" Absent netstream configuration """
cmds = list()
absent_netstream_attr = False
if not self.netstream_cfg:
return
if self.description or self.match or self.collect_counter or self.collect_interface:
absent_netstream_attr = True
if absent_netstream_attr:
if self.type == "ip":
cmd = "netstream record %s ip" % self.record_name
else:
cmd = "netstream record %s vxlan inner-ip" % self.record_name
cmds.append(cmd)
if self.description:
cfg = "description %s" % self.description
if self.netstream_cfg and cfg in self.netstream_cfg:
cmd = "undo description %s" % self.description
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.match:
if self.type == "ip":
cfg = "match ip %s" % self.match
else:
cfg = "match inner-ip %s" % self.match
if self.netstream_cfg and cfg in self.netstream_cfg:
if self.type == "ip":
cmd = "undo match ip %s" % self.match
else:
cmd = "undo match inner-ip %s" % self.match
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.collect_counter:
cfg = "collect counter %s" % self.collect_counter
if self.netstream_cfg and cfg in self.netstream_cfg:
cmd = "undo collect counter %s" % self.collect_counter
cmds.append(cmd)
self.updates_cmd.append(cmd)
if self.collect_interface:
cfg = "collect interface %s" % self.collect_interface
if self.netstream_cfg and cfg in self.netstream_cfg:
cmd = "undo collect interface %s" % self.collect_interface
cmds.append(cmd)
self.updates_cmd.append(cmd)
if len(cmds) > 1:
self.cli_load_config(cmds)
self.changed = True
else:
if self.type == "ip":
cmd = "undo netstream record %s ip" % self.record_name
else:
cmd = "undo netstream record %s vxlan inner-ip" % self.record_name
cmds.append(cmd)
self.updates_cmd.append(cmd)
self.cli_load_config(cmds)
self.changed = True
def work(self):
""" Work function """
self.check_args()
self.get_proposed()
self.get_existing()
if self.state == "present":
self.present_netstream()
else:
self.absent_netstream()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
self.results['updates'] = self.updates_cmd
self.module.exit_json(**self.results)
def main():
""" Module main """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
type=dict(choices=['ip', 'vxlan'], required=True),
record_name=dict(type='str'),
match=dict(choices=['destination-address', 'destination-port',
'tos', 'protocol', 'source-address', 'source-port']),
collect_counter=dict(choices=['bytes', 'packets']),
collect_interface=dict(choices=['input', 'output']),
description=dict(type='str')
)
argument_spec.update(ce_argument_spec)
module = NetstreamTemplate(argument_spec=argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
ChawalitK/odoo | openerp/loglevels.py | 50 | 3579 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import sys
LOG_NOTSET = 'notset'
LOG_DEBUG = 'debug'
LOG_INFO = 'info'
LOG_WARNING = 'warn'
LOG_ERROR = 'error'
LOG_CRITICAL = 'critical'
# TODO get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are here until we refactor tools so that this module doesn't depends on tools.
def get_encodings(hint_encoding='utf-8'):
fallbacks = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'cp1252': '1252',
}
if hint_encoding:
yield hint_encoding
if hint_encoding.lower() in fallbacks:
yield fallbacks[hint_encoding.lower()]
# some defaults (also taking care of pure ASCII)
for charset in ['utf8','latin1']:
if not hint_encoding or (charset.lower() != hint_encoding.lower()):
yield charset
from locale import getpreferredencoding
prefenc = getpreferredencoding()
if prefenc and prefenc.lower() != 'utf-8':
yield prefenc
prefenc = fallbacks.get(prefenc.lower())
if prefenc:
yield prefenc
def ustr(value, hint_encoding='utf-8', errors='strict'):
"""This method is similar to the builtin `unicode`, except
that it may try multiple encodings to find one that works
for decoding `value`, and defaults to 'utf-8' first.
:param: value: the value to convert
:param: hint_encoding: an optional encoding that was detecte
upstream and should be tried first to decode ``value``.
:param str errors: optional `errors` flag to pass to the unicode
built-in to indicate how illegal character values should be
treated when converting a string: 'strict', 'ignore' or 'replace'
(see ``unicode()`` constructor).
Passing anything other than 'strict' means that the first
encoding tried will be used, even if it's not the correct
one to use, so be careful! Ignored if value is not a string/unicode.
:raise: UnicodeError if value cannot be coerced to unicode
:return: unicode string representing the given value
"""
# We use direct type comparison instead of `isinstance`
# as much as possible, in order to make the most common
# cases faster (isinstance/issubclass are significantly slower)
ttype = type(value)
if ttype is unicode:
return value
# special short-circuit for str, as we still needs to support
# str subclasses such as `openerp.tools.unquote`
if ttype is str or issubclass(ttype, str):
# try hint_encoding first, avoids call to get_encoding()
# for the most common case
try:
return unicode(value, hint_encoding, errors=errors)
except Exception:
pass
# rare: no luck with hint_encoding, attempt other ones
for ln in get_encodings(hint_encoding):
try:
return unicode(value, ln, errors=errors)
except Exception:
pass
if isinstance(value, Exception):
return exception_to_unicode(value)
# fallback for non-string values
try:
return unicode(value)
except Exception:
raise UnicodeError('unable to convert %r' % (value,))
def exception_to_unicode(e):
if (sys.version_info[:2] < (2,6)) and hasattr(e, 'message'):
return ustr(e.message)
if hasattr(e, 'args'):
return "\n".join((ustr(a) for a in e.args))
try:
return unicode(e)
except Exception:
return u"Unknown message"
| gpl-3.0 |
IhToN/DAW1-PRG | Ejercicios/PrimTrim/Ejercicio37.py | 1 | 3643 | """
Crear utilidades matemáticas:
a. Escribir una función a la que se le pasa un número y devuelve una tupla con sus divisores.
b. Se define un número primo como aquel que no tiene más divisores que él mismo y la unidad.
Escribir una función que nos devuelva un True en caso de que ser un número primo.
c. Crear una función a la que se le pasa un límite y nos devuelve una lista con todos los
números primos por debajo de ese límite.
d. Seguir el método de la Criba de Eratóstenes.
e. Escribir una función a la que le vamos a pasar como parámetro un número que indicará una potencia de 10.
Imprimirá la cantidad de primos y el porcentaje de números primos hasta el límite introducido.
f. Escribir una función segmentos_primos(limite, ancho) y devuelva una lista de tuplas que cuente el número
de primos dentro de un rango que irá de ancho en ancho hasta limite.
"""
from math import ceil, sqrt
import time
def divisores(numero):
""" Devuelve una tupla con los divisores de numero
"""
ret = ()
for i in range(1, ceil((numero + 1) / 2)):
if numero % i == 0:
ret += i,
ret += numero,
return ret
def es_primo(numero):
""" Comprueba si el numero es primo o no, devuelve un boolean
"""
loop = 2
if numero < 2:
return False
while loop < ceil(sqrt(numero + 1)):
if numero % loop == 0:
return False
loop += 1
return True
def primos_hasta(numero):
""" Devuelve una lista con todos los primos menores o iguales que numero
"""
ret = []
for i in range(2, numero + 1):
if es_primo(i):
ret.append(i)
return ret
def criba_eratostenes(numero):
""" Devuelve una lista con todos los primos menores o iguales que numero
Usando el método de la Criba de Eratóstenes
"""
primos = [x for x in range(2, numero + 1)]
for index in range(0, (numero + 1) // 2):
primos = criba(index, primos)
return [x for x in primos if x]
def criba(index, lista_criba):
salto = lista_criba[index]
if salto:
for mul in range(index + salto, len(lista_criba), salto):
lista_criba[mul] = False
return lista_criba
def cantidad_primos(incluido, excluido):
""" Devuelve la cantidad de primos comprendidos entre dos valores, el primero
incluído y el segundo excluído del intervalo
"""
ret = 0
for num in range(incluido, excluido):
if es_primo(num):
ret += 1
return ret
def estadistica_primos(potencia_diez):
""" Imprime la cantidad de primos desde 0 hasta 10^potencia y el porcentaje
de éstos dentro de ese intervalo
"""
limite = 10 ** potencia_diez
cantidad = cantidad_primos(2, limite)
print('La cantidad de primos menores que', limite, 'es de', cantidad)
print('En total hay un ', round(cantidad * 100 / limite, 2), "% de primos en el intervalo", sep="")
def segmentos_primos(limite, ancho):
""" Devuelve una lista de tuplas que cuenta el número
de primos dentro de un rango que irá de ancho en ancho hasta limite
"""
ret = []
for cont in range(0, limite, ancho):
izquierda = cont
if izquierda == 0:
izquierda = 1
ret.append((izquierda, cont + ancho, cantidad_primos(cont - 1, cont + ancho - 1)))
return ret
"""print(divisores(22))
print(es_primo(13))
t1 = time.time()
print(primos_hasta(3000))
t2 = time.time()
print(t2 - t1)
print(criba_eratostenes(3000))
print(time.time() - t2)"""
estadistica_primos(3)
print(segmentos_primos(1000, 100))
| apache-2.0 |
bzero/statsmodels | statsmodels/sandbox/nonparametric/dgp_examples.py | 37 | 6008 | # -*- coding: utf-8 -*-
"""Examples of non-linear functions for non-parametric regression
Created on Sat Jan 05 20:21:22 2013
Author: Josef Perktold
"""
import numpy as np
## Functions
def fg1(x):
'''Fan and Gijbels example function 1
'''
return x + 2 * np.exp(-16 * x**2)
def fg1eu(x):
'''Eubank similar to Fan and Gijbels example function 1
'''
return x + 0.5 * np.exp(-50 * (x - 0.5)**2)
def fg2(x):
'''Fan and Gijbels example function 2
'''
return np.sin(2 * x) + 2 * np.exp(-16 * x**2)
def func1(x):
'''made up example with sin, square
'''
return np.sin(x * 5) / x + 2. * x - 1. * x**2
## Classes with Data Generating Processes
doc = {'description':
'''Base Class for Univariate non-linear example
Does not work on it's own.
needs additional at least self.func
''',
'ref': ''}
class _UnivariateFunction(object):
#Base Class for Univariate non-linear example.
#Does not work on it's own. needs additionally at least self.func
__doc__ = '''%(description)s
Parameters
----------
nobs : int
number of observations to simulate
x : None or 1d array
If x is given then it is used for the exogenous variable instead of
creating a random sample
distr_x : None or distribution instance
Only used if x is None. The rvs method is used to create a random
sample of the exogenous (explanatory) variable.
distr_noise : None or distribution instance
The rvs method is used to create a random sample of the errors.
Attributes
----------
x : ndarray, 1-D
exogenous or explanatory variable. x is sorted.
y : ndarray, 1-D
endogenous or response variable
y_true : ndarray, 1-D
expected values of endogenous or response variable, i.e. values of y
without noise
func : callable
underlying function (defined by subclass)
%(ref)s
''' #% doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
if x is None:
if distr_x is None:
x = np.random.normal(loc=0, scale=self.s_x, size=nobs)
else:
x = distr_x.rvs(size=nobs)
x.sort()
self.x = x
if distr_noise is None:
noise = np.random.normal(loc=0, scale=self.s_noise, size=nobs)
else:
noise = distr_noise.rvs(size=nobs)
if hasattr(self, 'het_scale'):
noise *= self.het_scale(self.x)
#self.func = fg1
self.y_true = y_true = self.func(x)
self.y = y_true + noise
def plot(self, scatter=True, ax=None):
'''plot the mean function and optionally the scatter of the sample
Parameters
----------
scatter: bool
If true, then add scatterpoints of sample to plot.
ax : None or matplotlib axis instance
If None, then a matplotlib.pyplot figure is created, otherwise
the given axis, ax, is used.
Returns
-------
fig : matplotlib figure
This is either the created figure instance or the one associated
with ax if ax is given.
'''
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
if scatter:
ax.plot(self.x, self.y, 'o', alpha=0.5)
xx = np.linspace(self.x.min(), self.x.max(), 100)
ax.plot(xx, self.func(xx), lw=2, color='b', label='dgp mean')
return ax.figure
doc = {'description':
'''Fan and Gijbels example function 1
linear trend plus a hump
''',
'ref':
'''
References
----------
Fan, Jianqing, and Irene Gijbels. 1992. "Variable Bandwidth and Local
Linear Regression Smoothers."
The Annals of Statistics 20 (4) (December): 2008-2036. doi:10.2307/2242378.
'''}
class UnivariateFanGijbels1(_UnivariateFunction):
__doc__ = _UnivariateFunction.__doc__ % doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.7
self.func = fg1
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
doc['description'] =\
'''Fan and Gijbels example function 2
sin plus a hump
'''
class UnivariateFanGijbels2(_UnivariateFunction):
__doc__ = _UnivariateFunction.__doc__ % doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.5
self.func = fg2
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
class UnivariateFanGijbels1EU(_UnivariateFunction):
'''
Eubank p.179f
'''
def __init__(self, nobs=50, x=None, distr_x=None, distr_noise=None):
if distr_x is None:
from scipy import stats
distr_x = stats.uniform
self.s_noise = 0.15
self.func = fg1eu
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
class UnivariateFunc1(_UnivariateFunction):
'''
made up, with sin and quadratic trend
'''
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
if x is None and distr_x is None:
from scipy import stats
distr_x = stats.uniform(-2, 4)
else:
nobs = x.shape[0]
self.s_noise = 2.
self.func = func1
super(UnivariateFunc1, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
def het_scale(self, x):
return np.sqrt(np.abs(3+x))
| bsd-3-clause |
randy-waterhouse/bitcoin | test/functional/feature_includeconf.py | 4 | 3989 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests the includeconf argument
Verify that:
1. adding includeconf to the configuration file causes the includeconf
file to be loaded in the correct order.
2. includeconf cannot be used as a command line argument.
3. includeconf cannot be used recursively (ie includeconf can only
be used from the base config file).
4. multiple includeconf arguments can be specified in the main config
file.
"""
import os
from test_framework.test_framework import BitcoinTestFramework
class IncludeConfTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
# Create additional config files
# - tmpdir/node0/relative.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative\n")
# - tmpdir/node0/relative2.conf
with open(os.path.join(self.options.tmpdir, "node0", "relative2.conf"), "w", encoding="utf8") as f:
f.write("uacomment=relative2\n")
with open(os.path.join(self.options.tmpdir, "node0", "bitcoin.conf"), "a", encoding='utf8') as f:
f.write("uacomment=main\nincludeconf=relative.conf\n")
def run_test(self):
self.log.info("-includeconf works from config file. subversion should end with 'main; relative)/'")
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.log.info("-includeconf cannot be used as command-line arg")
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(extra_args=["-includeconf=relative2.conf"], expected_msg="Error parsing command line arguments: -includeconf cannot be used from commandline; -includeconf=relative2.conf")
self.log.info("-includeconf cannot be used recursively. subversion should end with 'main; relative)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "a", encoding="utf8") as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative)/")
self.stop_node(0, expected_stderr="warning: -includeconf cannot be used from included files; ignoring -includeconf=relative2.conf")
self.log.info("-includeconf cannot contain invalid arg")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
f.write("foo=bar\n")
self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Invalid configuration value foo")
self.log.info("-includeconf cannot be invalid path")
os.remove(os.path.join(self.options.tmpdir, "node0", "relative.conf"))
self.nodes[0].assert_start_raises_init_error(expected_msg="Error reading configuration file: Failed to include configuration file relative.conf")
self.log.info("multiple -includeconf args can be used from the base config file. subversion should end with 'main; relative; relative2)/'")
with open(os.path.join(self.options.tmpdir, "node0", "relative.conf"), "w", encoding="utf8") as f:
# Restore initial file contents
f.write("uacomment=relative\n")
with open(os.path.join(self.options.tmpdir, "node0", "bitcoin.conf"), "a", encoding='utf8') as f:
f.write("includeconf=relative2.conf\n")
self.start_node(0)
subversion = self.nodes[0].getnetworkinfo()["subversion"]
assert subversion.endswith("main; relative; relative2)/")
if __name__ == '__main__':
IncludeConfTest().main()
| mit |
aim16/shadowsocks0 | shadowsocks/crypto/sodium.py | 1032 | 3778 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_ulonglong, byref, \
create_string_buffer, c_void_p
from shadowsocks.crypto import util
__all__ = ['ciphers']
libsodium = None
loaded = False
buf_size = 2048
# for salsa20 and chacha20
BLOCK_SIZE = 64
def load_libsodium():
global loaded, libsodium, buf
libsodium = util.find_library('sodium', 'crypto_stream_salsa20_xor_ic',
'libsodium')
if libsodium is None:
raise Exception('libsodium not found')
libsodium.crypto_stream_salsa20_xor_ic.restype = c_int
libsodium.crypto_stream_salsa20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
libsodium.crypto_stream_chacha20_xor_ic.restype = c_int
libsodium.crypto_stream_chacha20_xor_ic.argtypes = (c_void_p, c_char_p,
c_ulonglong,
c_char_p, c_ulonglong,
c_char_p)
buf = create_string_buffer(buf_size)
loaded = True
class SodiumCrypto(object):
def __init__(self, cipher_name, key, iv, op):
if not loaded:
load_libsodium()
self.key = key
self.iv = iv
self.key_ptr = c_char_p(key)
self.iv_ptr = c_char_p(iv)
if cipher_name == 'salsa20':
self.cipher = libsodium.crypto_stream_salsa20_xor_ic
elif cipher_name == 'chacha20':
self.cipher = libsodium.crypto_stream_chacha20_xor_ic
else:
raise Exception('Unknown cipher')
# byte counter, not block counter
self.counter = 0
def update(self, data):
global buf_size, buf
l = len(data)
# we can only prepend some padding to make the encryption align to
# blocks
padding = self.counter % BLOCK_SIZE
if buf_size < padding + l:
buf_size = (padding + l) * 2
buf = create_string_buffer(buf_size)
if padding:
data = (b'\0' * padding) + data
self.cipher(byref(buf), c_char_p(data), padding + l,
self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr)
self.counter += l
# buf is copied to a str object when we access buf.raw
# strip off the padding
return buf.raw[padding:padding + l]
ciphers = {
'salsa20': (32, 8, SodiumCrypto),
'chacha20': (32, 8, SodiumCrypto),
}
def test_salsa20():
cipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_chacha20():
cipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 1)
decipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test_chacha20()
test_salsa20()
| apache-2.0 |
jordiclariana/ansible | lib/ansible/plugins/connection/accelerate.py | 39 | 13466 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import socket
import struct
import time
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound, AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes
from ansible.parsing.utils.jsonify import jsonify
from ansible.plugins.connection import ConnectionBase
from ansible.utils.encrypt import key_for_hostname, keyczar_encrypt, keyczar_decrypt
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
# the chunk size to read and send, assuming mtu 1500 and
# leaving room for base64 (+33%) encoding and header (8 bytes)
# ((1400-8)/4)*3) = 1044
# which leaves room for the TCP/IP header. We set this to a
# multiple of the value to speed up file reads.
CHUNK_SIZE=1044*20
class Connection(ConnectionBase):
''' raw socket accelerated connection '''
transport = 'accelerate'
has_pipelining = False
become_methods = frozenset(C.BECOME_METHODS).difference(['runas'])
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.conn = None
self.key = key_for_hostname(self._play_context.remote_addr)
def _connect(self):
''' activates the connection object '''
if not self._connected:
wrong_user = False
tries = 3
self.conn = socket.socket()
self.conn.settimeout(C.ACCELERATE_CONNECT_TIMEOUT)
display.vvvv("attempting connection to %s via the accelerated port %d" % (self._play_context.remote_addr, self._play_context.accelerate_port), host=self._play_context.remote_addr)
while tries > 0:
try:
self.conn.connect((self._play_context.remote_addr,self._play_context.accelerate_port))
break
except socket.error:
display.vvvv("connection to %s failed, retrying..." % self._play_context.remote_addr, host=self._play_context.remote_addr)
time.sleep(0.1)
tries -= 1
if tries == 0:
display.vvv("Could not connect via the accelerated connection, exceeded # of tries", host=self._play_context.remote_addr)
raise AnsibleConnectionFailure("Failed to connect to %s on the accelerated port %s" % (self._play_context.remote_addr, self._play_context.accelerate_port))
elif wrong_user:
display.vvv("Restarting daemon with a different remote_user", host=self._play_context.remote_addr)
raise AnsibleError("The accelerated daemon was started on the remote with a different user")
self.conn.settimeout(C.ACCELERATE_TIMEOUT)
if not self.validate_user():
# the accelerated daemon was started with a
# different remote_user. The above command
# should have caused the accelerate daemon to
# shutdown, so we'll reconnect.
wrong_user = True
self._connected = True
return self
def send_data(self, data):
packed_len = struct.pack('!Q',len(data))
return self.conn.sendall(packed_len + data)
def recv_data(self):
header_len = 8 # size of a packed unsigned long long
data = b""
try:
display.vvvv("in recv_data(), waiting for the header", host=self._play_context.remote_addr)
while len(data) < header_len:
d = self.conn.recv(header_len - len(data))
if not d:
display.vvvv("received nothing, bailing out", host=self._play_context.remote_addr)
return None
data += d
display.vvvv("got the header, unpacking", host=self._play_context.remote_addr)
data_len = struct.unpack('!Q',data[:header_len])[0]
data = data[header_len:]
display.vvvv("data received so far (expecting %d): %d" % (data_len, len(data)), host=self._play_context.remote_addr)
while len(data) < data_len:
d = self.conn.recv(data_len - len(data))
if not d:
display.vvvv("received nothing, bailing out", host=self._play_context.remote_addr)
return None
display.vvvv("received %d bytes" % (len(d)), host=self._play_context.remote_addr)
data += d
display.vvvv("received all of the data, returning", host=self._play_context.remote_addr)
return data
except socket.timeout:
raise AnsibleError("timed out while waiting to receive data")
def validate_user(self):
'''
Checks the remote uid of the accelerated daemon vs. the
one specified for this play and will cause the accel
daemon to exit if they don't match
'''
display.vvvv("sending request for validate_user", host=self._play_context.remote_addr)
data = dict(
mode='validate_user',
username=self._play_context.remote_user,
)
data = jsonify(data)
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("Failed to send command to %s" % self._play_context.remote_addr)
display.vvvv("waiting for validate_user response", host=self._play_context.remote_addr)
while True:
# we loop here while waiting for the response, because a
# long running command may cause us to receive keepalive packets
# ({"pong":"true"}) rather than the response we want.
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if "pong" in response:
# it's a keepalive, go back to waiting
display.vvvv("received a keepalive packet", host=self._play_context.remote_addr)
continue
else:
display.vvvv("received the validate_user response: %s" % (response), host=self._play_context.remote_addr)
break
if response.get('failed'):
return False
else:
return response.get('rc') == 0
def exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
display.vvv("EXEC COMMAND %s" % cmd, host=self._play_context.remote_addr)
data = dict(
mode='command',
cmd=cmd,
executable=C.DEFAULT_EXECUTABLE,
)
data = jsonify(data)
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("Failed to send command to %s" % self._play_context.remote_addr)
while True:
# we loop here while waiting for the response, because a
# long running command may cause us to receive keepalive packets
# ({"pong":"true"}) rather than the response we want.
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if "pong" in response:
# it's a keepalive, go back to waiting
display.vvvv("received a keepalive packet", host=self._play_context.remote_addr)
continue
else:
display.vvvv("received the response", host=self._play_context.remote_addr)
break
return (response.get('rc', None), response.get('stdout', ''), response.get('stderr', ''))
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
in_path = to_bytes(in_path, errors='surrogate_or_strict')
if not os.path.exists(in_path):
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
fd = file(in_path, 'rb')
fstat = os.stat(in_path)
try:
display.vvv("PUT file is %d bytes" % fstat.st_size, host=self._play_context.remote_addr)
last = False
while fd.tell() <= fstat.st_size and not last:
display.vvvv("file position currently %ld, file size is %ld" % (fd.tell(), fstat.st_size), host=self._play_context.remote_addr)
data = fd.read(CHUNK_SIZE)
if fd.tell() >= fstat.st_size:
last = True
data = dict(mode='put', data=base64.b64encode(data), out_path=out_path, last=last)
if self._play_context.become:
data['user'] = self._play_context.become_user
data = jsonify(data)
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to send the file to %s" % self._play_context.remote_addr)
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if response.get('failed',False):
raise AnsibleError("failed to put the file in the requested location")
finally:
fd.close()
display.vvvv("waiting for final response after PUT", host=self._play_context.remote_addr)
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if response.get('failed',False):
raise AnsibleError("failed to put the file in the requested location")
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
data = dict(mode='fetch', in_path=in_path)
data = jsonify(data)
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to initiate the file fetch with %s" % self._play_context.remote_addr)
fh = open(to_bytes(out_path, errors='surrogate_or_strict'), "w")
try:
bytes = 0
while True:
response = self.recv_data()
if not response:
raise AnsibleError("Failed to get a response from %s" % self._play_context.remote_addr)
response = keyczar_decrypt(self.key, response)
response = json.loads(response)
if response.get('failed', False):
raise AnsibleError("Error during file fetch, aborting")
out = base64.b64decode(response['data'])
fh.write(out)
bytes += len(out)
# send an empty response back to signify we
# received the last chunk without errors
data = jsonify(dict())
data = keyczar_encrypt(self.key, data)
if self.send_data(data):
raise AnsibleError("failed to send ack during file fetch")
if response.get('last', False):
break
finally:
# we don't currently care about this final response,
# we just receive it and drop it. It may be used at some
# point in the future or we may just have the put/fetch
# operations not send back a final response at all
response = self.recv_data()
display.vvv("FETCH wrote %d bytes to %s" % (bytes, out_path), host=self._play_context.remote_addr)
fh.close()
def close(self):
''' terminate the connection '''
# Be a good citizen
try:
self.conn.close()
except:
pass
| gpl-3.0 |
dagwieers/ansible | test/units/modules/network/eos/test_eos_banner.py | 55 | 3617 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.eos import eos_banner
from units.modules.utils import set_module_args
from .eos_module import TestEosModule, load_fixture
class TestEosBannerModule(TestEosModule):
module = eos_banner
def setUp(self):
super(TestEosBannerModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.eos.eos_banner.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.eos.eos_banner.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestEosBannerModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
if transport == 'cli':
self.run_commands.return_value = [load_fixture('eos_banner_show_banner.txt').strip()]
else:
self.run_commands.return_value = [{'loginBanner': load_fixture('eos_banner_show_banner.txt').strip()}]
self.load_config.return_value = dict(diff=None, session='session')
def test_eos_banner_create_with_cli_transport(self):
set_module_args(dict(banner='login', text='test\nbanner\nstring',
transport='cli'))
commands = ['banner login', 'test', 'banner', 'string', 'EOF']
self.execute_module(changed=True, commands=commands)
def test_eos_banner_remove_with_cli_transport(self):
set_module_args(dict(banner='login', state='absent', transport='cli'))
commands = ['no banner login']
self.execute_module(changed=True, commands=commands)
def test_eos_banner_create_with_eapi_transport(self):
set_module_args(dict(banner='login', text='test\nbanner\nstring',
transport='eapi'))
commands = ['banner login']
inputs = ['test\nbanner\nstring']
self.execute_module(changed=True, commands=commands, inputs=inputs, transport='eapi')
def test_eos_banner_remove_with_eapi_transport(self):
set_module_args(dict(banner='login', state='absent', transport='eapi'))
commands = ['no banner login']
self.execute_module(changed=True, commands=commands, transport='eapi')
def test_eos_banner_nochange_with_cli_transport(self):
banner_text = load_fixture('eos_banner_show_banner.txt').strip()
set_module_args(dict(banner='login', text=banner_text, transport='cli'))
self.execute_module()
def test_eos_banner_nochange_with_eapi_transport(self):
banner_text = load_fixture('eos_banner_show_banner.txt').strip()
set_module_args(dict(banner='login', text=banner_text, transport='eapi'))
self.execute_module(transport='eapi')
| gpl-3.0 |
etherkit/OpenBeacon2 | client/win/venv/Lib/site-packages/pip/_vendor/colorama/winterm.py | 83 | 6438 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from . import win32
# from wincon.h
class WinColor(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
class WinStyle(object):
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
BRIGHT_BACKGROUND = 0x80 # dim text, bright background
class WinTerm(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
# In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
# So that LIGHT_EX colors and BRIGHT style do not clobber each other,
# we track them separately, since LIGHT_EX is overwritten by Fore/Back
# and BRIGHT is overwritten by Style codes.
self._light = 0
def get_attrs(self):
return self._fore + self._back * 16 + (self._style | self._light)
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
self._light = 0
def fore(self, fore=None, light=False, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
# Emulate LIGHT_EX with BRIGHT Style
if light:
self._light |= WinStyle.BRIGHT
else:
self._light &= ~WinStyle.BRIGHT
self.set_console(on_stderr=on_stderr)
def back(self, back=None, light=False, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
# Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
if light:
self._light |= WinStyle.BRIGHT_BACKGROUND
else:
self._light &= ~WinStyle.BRIGHT_BACKGROUND
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def get_position(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
# I'm not currently tracking the position, so there is no default.
# position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def cursor_adjust(self, x, y, on_stderr=False):
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
position = self.get_position(handle)
adjusted_position = (position.Y + y, position.X + x)
win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
def erase_screen(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen, and move cursor to (1,1)
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
# get number of character cells before current cursor position
cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = cells_in_screen - cells_before_cursor
elif mode == 1:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_before_cursor
elif mode == 2:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_in_screen
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
if mode == 2:
# put the cursor where needed
win32.SetConsoleCursorPosition(handle, (1, 1))
def erase_line(self, mode=0, on_stderr=False):
# 0 should clear from the cursor to the end of the line.
# 1 should clear from the cursor to the beginning of the line.
# 2 should clear the entire line.
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
csbi = win32.GetConsoleScreenBufferInfo(handle)
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
elif mode == 1:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwCursorPosition.X
elif mode == 2:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwSize.X
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
def set_title(self, title):
win32.SetConsoleTitle(title)
| gpl-3.0 |
martinling/imusim | imusim/tests/system/reality_test.py | 2 | 6801 | """
Test simulated outputs against real captured sensor data.
"""
# Copyright (C) 2009-2011 University of Edinburgh
#
# This file is part of IMUSim.
#
# IMUSim is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IMUSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IMUSim. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
from imusim.io.qualisys_tsv import loadQualisysTSVFile
from imusim.capture.marker import SplinedMarkerCapture
from imusim.trajectories.multi_marker import MultiMarkerTrajectory
from imusim.trajectories.offset import OffsetTrajectory
from imusim.capture.sensor import SensorDataCapture
from imusim.trajectories.rigid_body import SampledBodyModel, SampledJoint
from imusim.trajectories.rigid_body import SplinedBodyModel
from imusim.platforms.imus import IdealIMU
from imusim.behaviours.imu import BasicIMUBehaviour
from imusim.environment.base import Environment
from imusim.maths.vector_fields import NaturalNeighbourInterpolatedField
from imusim.utilities.time_series import TimeSeries
from imusim.simulation.base import Simulation
from imusim.testing.vectors import assert_vectors_correlated
import numpy as np
from os import path
def testAgainstReality():
dir = path.dirname(__file__)
filebase = path.join(dir, "swing")
refbase = path.join(dir, "stand")
magbases = [path.join(dir, f) for f in ['magsweep1', 'magsweep2']]
maglookup = {
'Upper Leg IMU' : '66',
'Orient 8' : '8',
'Orient 43': '43'}
magSamples = 2000
refTime = 1.0
posStdDev = 0.0005
rotStdDev = 0.004
ref3D = SplinedMarkerCapture(
loadQualisysTSVFile(refbase + "_3D.tsv"), positionStdDev=posStdDev)
ref6D = SplinedMarkerCapture(
loadQualisysTSVFile(refbase + "_6D.tsv"), rotationStdDev=rotStdDev)
capture3D = SplinedMarkerCapture(
loadQualisysTSVFile(filebase + "_3D.tsv"), positionStdDev=posStdDev)
captureSD = SensorDataCapture.load(filebase + ".sdc")
hip, thigh, knee, shin, ankle = \
['Hip', 'Thigh', 'Knee Hinge', 'Shin', 'Ankle']
jointNames = ['Upper Leg', 'Lower Leg', 'Foot']
jointAbbrevs = ['femur', 'tibia', 'foot']
orientIDs = ['66', '43', '8']
jointMarkerNames = [hip, knee, ankle]
refMarkerNames = [[thigh, knee], [shin, ankle], []]
imuMarkerNames = \
[[j + ' IMU - ' + str(i) for i in range(1,4)] for j in jointNames]
jointMarkerSets = lambda c: [
map(c.marker, jointMarkerNames),
[map(c.marker, r) for r in refMarkerNames],
[map(c.marker, i) for i in imuMarkerNames]]
imuMarkerSets = lambda c: [
[c.marker(i[0]) for i in imuMarkerNames],
[map(c.marker,i[1:]) for i in imuMarkerNames]]
jointRefTrajectories = [MultiMarkerTrajectory(j, r + i, refTime=refTime)
for j, r, i in zip(*(jointMarkerSets(ref3D)))]
jointTrajectories = [
MultiMarkerTrajectory(j, r + i, refVectors=m.refVectors) \
for j, r, i, m in \
zip(*(jointMarkerSets(capture3D) + [jointRefTrajectories]))]
imuRefTrajectories = [MultiMarkerTrajectory(p, r, refTime=refTime)
for p, r in zip(*(imuMarkerSets(ref3D)))]
imuVecTrajectories = [MultiMarkerTrajectory(p, r, refVectors=m.refVectors)
for p, r, m in zip(*(imuMarkerSets(capture3D) + [imuRefTrajectories]))]
imuRefMarkers = [ref6D.marker(j + ' IMU') for j in jointNames]
imuOffsets = [i.position(refTime) - j.position(refTime)
for i, j in zip(imuRefTrajectories, jointRefTrajectories)]
imuRotations = [t.rotation(refTime).conjugate * m.rotation(refTime)
for t, m in zip(imuRefTrajectories, imuRefMarkers)]
imuTrajectories = [OffsetTrajectory(v, o, r)
for v, o, r in zip(imuVecTrajectories, imuOffsets, imuRotations)]
imuData = [captureSD.device(i) for i in orientIDs]
joints = []
for i in range(len(jointNames)):
name = jointNames[i]
traj = jointTrajectories[i]
if i == 0:
model = SampledBodyModel(name)
model.positionKeyFrames = traj.posMarker.positionKeyFrames
joint = model
else:
parent = joints[-1]
refTraj = jointRefTrajectories[i]
parentRefTraj = jointRefTrajectories[i - 1]
pos = refTraj.position(refTime)
parentPos = parentRefTraj.position(refTime)
joint = SampledJoint(joints[-1],name, offset=(pos - parentPos))
joint.rotationKeyFrames = traj.rotationKeyFrames
joints.append(joint)
model = SplinedBodyModel(model)
joints = model.joints
imuJointTrajectories = [OffsetTrajectory(j, o, r)
for j, o, r in zip(joints, imuOffsets, imuRotations)]
positionSets = []
valueSets = []
for magbase in magbases:
orient = SensorDataCapture.load(magbase + '.sdc')
optical = loadQualisysTSVFile(magbase + '_6D.tsv')
for marker in optical.markers:
device = orient.device(maglookup[marker.id])
magData = device.sensorData('magnetometer').values
positionSets.append(marker.positionKeyFrames.values)
valueSets.append(
marker.rotationKeyFrames.values.rotateVector(magData))
positions = np.hstack(positionSets)
values = np.hstack(valueSets)
valid = ~np.any(np.isnan(positions),axis=0) & ~np.any(np.isnan(values),axis=0)
dev = values - np.median(values[:,valid],axis=1).reshape((3,1))
step = np.shape(values[:,valid])[1] / magSamples
posSamples = positions[:,valid][:,::step]
valSamples = values[:,valid][:,::step]
environment = Environment()
environment.magneticField = \
NaturalNeighbourInterpolatedField(posSamples, valSamples)
sim = Simulation(environment=environment)
sim.time = model.startTime
distortIMUs = []
dt = 1/capture3D.sampled.frameRate
for traj in imuJointTrajectories:
platform = IdealIMU(sim, traj)
distortIMUs.append(BasicIMUBehaviour(platform, dt))
sim.run(model.endTime)
for imu in range(3):
for sensorName in ['accelerometer', 'magnetometer', 'gyroscope']:
sim = getattr(distortIMUs[imu].imu,sensorName).rawMeasurements
true = imuData[imu].sensorData(sensorName)(sim.timestamps + model.startTime)
yield assert_vectors_correlated, sim.values, true, 0.8
| gpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/tornado/wsgi.py | 18 | 13430 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI support for the Tornado web framework.
WSGI is the Python standard for web servers, and allows for interoperability
between Tornado and other Python web frameworks and servers. This module
provides WSGI support in two ways:
* `WSGIAdapter` converts a `tornado.web.Application` to the WSGI application
interface. This is useful for running a Tornado app on another
HTTP server, such as Google App Engine. See the `WSGIAdapter` class
documentation for limitations that apply.
* `WSGIContainer` lets you run other WSGI applications and frameworks on the
Tornado HTTP server. For example, with this class you can mix Django
and Tornado handlers in a single server.
"""
from __future__ import absolute_import, division, print_function, with_statement
import sys
from io import BytesIO
import tornado
from tornado.concurrent import Future
from tornado import escape
from tornado import httputil
from tornado.log import access_log
from tornado import web
from tornado.escape import native_str
from tornado.util import unicode_type, PY3
if PY3:
import urllib.parse as urllib_parse # py3
else:
import urllib as urllib_parse
# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
# that are smuggled inside objects of type unicode (via the latin1 encoding).
# These functions are like those in the tornado.escape module, but defined
# here to minimize the temptation to use them in non-wsgi contexts.
if str is unicode_type:
def to_wsgi_str(s):
assert isinstance(s, bytes)
return s.decode('latin1')
def from_wsgi_str(s):
assert isinstance(s, str)
return s.encode('latin1')
else:
def to_wsgi_str(s):
assert isinstance(s, bytes)
return s
def from_wsgi_str(s):
assert isinstance(s, str)
return s
class WSGIApplication(web.Application):
"""A WSGI equivalent of `tornado.web.Application`.
.. deprecated:: 4.0
Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
"""
def __call__(self, environ, start_response):
return WSGIAdapter(self)(environ, start_response)
# WSGI has no facilities for flow control, so just return an already-done
# Future when the interface requires it.
_dummy_future = Future()
_dummy_future.set_result(None)
class _WSGIConnection(httputil.HTTPConnection):
def __init__(self, method, start_response, context):
self.method = method
self.start_response = start_response
self.context = context
self._write_buffer = []
self._finished = False
self._expected_content_remaining = None
self._error = None
def set_close_callback(self, callback):
# WSGI has no facility for detecting a closed connection mid-request,
# so we can simply ignore the callback.
pass
def write_headers(self, start_line, headers, chunk=None, callback=None):
if self.method == 'HEAD':
self._expected_content_remaining = 0
elif 'Content-Length' in headers:
self._expected_content_remaining = int(headers['Content-Length'])
else:
self._expected_content_remaining = None
self.start_response(
'%s %s' % (start_line.code, start_line.reason),
[(native_str(k), native_str(v)) for (k, v) in headers.get_all()])
if chunk is not None:
self.write(chunk, callback)
elif callback is not None:
callback()
return _dummy_future
def write(self, chunk, callback=None):
if self._expected_content_remaining is not None:
self._expected_content_remaining -= len(chunk)
if self._expected_content_remaining < 0:
self._error = httputil.HTTPOutputError(
"Tried to write more data than Content-Length")
raise self._error
self._write_buffer.append(chunk)
if callback is not None:
callback()
return _dummy_future
def finish(self):
if (self._expected_content_remaining is not None and
self._expected_content_remaining != 0):
self._error = httputil.HTTPOutputError(
"Tried to write %d bytes less than Content-Length" %
self._expected_content_remaining)
raise self._error
self._finished = True
class _WSGIRequestContext(object):
def __init__(self, remote_ip, protocol):
self.remote_ip = remote_ip
self.protocol = protocol
def __str__(self):
return self.remote_ip
class WSGIAdapter(object):
"""Converts a `tornado.web.Application` instance into a WSGI application.
Example usage::
import tornado.web
import tornado.wsgi
import wsgiref.simple_server
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
wsgi_app = tornado.wsgi.WSGIAdapter(application)
server = wsgiref.simple_server.make_server('', 8888, wsgi_app)
server.serve_forever()
See the `appengine demo
<https://github.com/tornadoweb/tornado/tree/stable/demos/appengine>`_
for an example of using this module to run a Tornado app on Google
App Engine.
In WSGI mode asynchronous methods are not supported. This means
that it is not possible to use `.AsyncHTTPClient`, or the
`tornado.auth` or `tornado.websocket` modules.
.. versionadded:: 4.0
"""
def __init__(self, application):
if isinstance(application, WSGIApplication):
self.application = lambda request: web.Application.__call__(
application, request)
else:
self.application = application
def __call__(self, environ, start_response):
method = environ["REQUEST_METHOD"]
uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
if environ.get("QUERY_STRING"):
uri += "?" + environ["QUERY_STRING"]
headers = httputil.HTTPHeaders()
if environ.get("CONTENT_TYPE"):
headers["Content-Type"] = environ["CONTENT_TYPE"]
if environ.get("CONTENT_LENGTH"):
headers["Content-Length"] = environ["CONTENT_LENGTH"]
for key in environ:
if key.startswith("HTTP_"):
headers[key[5:].replace("_", "-")] = environ[key]
if headers.get("Content-Length"):
body = environ["wsgi.input"].read(
int(headers["Content-Length"]))
else:
body = b""
protocol = environ["wsgi.url_scheme"]
remote_ip = environ.get("REMOTE_ADDR", "")
if environ.get("HTTP_HOST"):
host = environ["HTTP_HOST"]
else:
host = environ["SERVER_NAME"]
connection = _WSGIConnection(method, start_response,
_WSGIRequestContext(remote_ip, protocol))
request = httputil.HTTPServerRequest(
method, uri, "HTTP/1.1", headers=headers, body=body,
host=host, connection=connection)
request._parse_body()
self.application(request)
if connection._error:
raise connection._error
if not connection._finished:
raise Exception("request did not finish synchronously")
return connection._write_buffer
class WSGIContainer(object):
r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
.. warning::
WSGI is a *synchronous* interface, while Tornado's concurrency model
is based on single-threaded asynchronous execution. This means that
running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
than running the same app in a multi-threaded WSGI server like
``gunicorn`` or ``uwsgi``. Use `WSGIContainer` only when there are
benefits to combining Tornado and WSGI in the same process that
outweigh the reduced scalability.
Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
run it. For example::
def simple_app(environ, start_response):
status = "200 OK"
response_headers = [("Content-type", "text/plain")]
start_response(status, response_headers)
return ["Hello world!\n"]
container = tornado.wsgi.WSGIContainer(simple_app)
http_server = tornado.httpserver.HTTPServer(container)
http_server.listen(8888)
tornado.ioloop.IOLoop.current().start()
This class is intended to let other frameworks (Django, web.py, etc)
run on the Tornado HTTP server and I/O loop.
The `tornado.web.FallbackHandler` class is often useful for mixing
Tornado and WSGI apps in the same server. See
https://github.com/bdarnell/django-tornado-demo for a complete example.
"""
def __init__(self, wsgi_application):
self.wsgi_application = wsgi_application
def __call__(self, request):
data = {}
response = []
def start_response(status, response_headers, exc_info=None):
data["status"] = status
data["headers"] = response_headers
return response.append
app_response = self.wsgi_application(
WSGIContainer.environ(request), start_response)
try:
response.extend(app_response)
body = b"".join(response)
finally:
if hasattr(app_response, "close"):
app_response.close()
if not data:
raise Exception("WSGI app did not call start_response")
status_code, reason = data["status"].split(' ', 1)
status_code = int(status_code)
headers = data["headers"]
header_set = set(k.lower() for (k, v) in headers)
body = escape.utf8(body)
if status_code != 304:
if "content-length" not in header_set:
headers.append(("Content-Length", str(len(body))))
if "content-type" not in header_set:
headers.append(("Content-Type", "text/html; charset=UTF-8"))
if "server" not in header_set:
headers.append(("Server", "TornadoServer/%s" % tornado.version))
start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
header_obj = httputil.HTTPHeaders()
for key, value in headers:
header_obj.add(key, value)
request.connection.write_headers(start_line, header_obj, chunk=body)
request.connection.finish()
self._log(status_code, request)
@staticmethod
def environ(request):
"""Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
"""
hostport = request.host.split(":")
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1])
else:
host = request.host
port = 443 if request.protocol == "https" else 80
environ = {
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": to_wsgi_str(escape.url_unescape(
request.path, encoding=None, plus=False)),
"QUERY_STRING": request.query,
"REMOTE_ADDR": request.remote_ip,
"SERVER_NAME": host,
"SERVER_PORT": str(port),
"SERVER_PROTOCOL": request.version,
"wsgi.version": (1, 0),
"wsgi.url_scheme": request.protocol,
"wsgi.input": BytesIO(escape.utf8(request.body)),
"wsgi.errors": sys.stderr,
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
if "Content-Type" in request.headers:
environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
if "Content-Length" in request.headers:
environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
for key, value in request.headers.items():
environ["HTTP_" + key.replace("-", "_").upper()] = value
return environ
def _log(self, status_code, request):
if status_code < 400:
log_method = access_log.info
elif status_code < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * request.request_time()
summary = request.method + " " + request.uri + " (" + \
request.remote_ip + ")"
log_method("%d %s %.2fms", status_code, summary, request_time)
HTTPRequest = httputil.HTTPServerRequest
| gpl-3.0 |
bramalingam/openmicroscopy | components/tools/OmeroPy/src/omero_ext/tiltpicker/pyami/weakattr.py | 16 | 1113 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import weakref
import threading
keyobject = weakref.WeakValueDictionary()
keyvalue = weakref.WeakKeyDictionary()
keylookup = weakref.WeakValueDictionary()
threadlock = threading.RLock()
class Key(object):
pass
def key(obj, attrname):
k = str(id(obj)) + attrname
try:
return keylookup[k]
except KeyError:
newkey = Key()
keylookup[k] = newkey
return newkey
def set(obj, attrname, attrvalue):
threadlock.acquire()
try:
k = key(obj, attrname)
keyobject[k] = obj
keyvalue[k] = attrvalue
finally:
threadlock.release()
def get(obj, attrname):
threadlock.acquire()
try:
k = key(obj, attrname)
try:
return keyvalue[k]
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'" % (type(obj), attrname))
finally:
threadlock.release()
def debug():
print 'KEYOBJECT', len(keyobject)
print 'KEYVALUE', len(keyvalue)
print 'KEYLOOKUP', len(keylookup)
if __name__ == '__main__':
class MyThing(object):
pass
debug()
for i in range(45):
a = MyThing()
set(a, 'asdf', i)
print 'ASDF', get(a, 'asdf')
debug()
| gpl-2.0 |
jamesls/boto | boto/file/bucket.py | 97 | 4075 | # Copyright 2010 Google Inc.
# Copyright (c) 2011, Nexenta Systems Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# File representation of bucket, for use with "file://" URIs.
import os
from key import Key
from boto.file.simpleresultset import SimpleResultSet
from boto.s3.bucketlistresultset import BucketListResultSet
class Bucket(object):
def __init__(self, name, contained_key):
"""Instantiate an anonymous file-based Bucket around a single key.
"""
self.name = name
self.contained_key = contained_key
def __iter__(self):
return iter(BucketListResultSet(self))
def __str__(self):
return 'anonymous bucket for file://' + self.contained_key
def delete_key(self, key_name, headers=None,
version_id=None, mfa_token=None):
"""
Deletes a key from the bucket.
:type key_name: string
:param key_name: The key name to delete
:type version_id: string
:param version_id: Unused in this subclass.
:type mfa_token: tuple or list of strings
:param mfa_token: Unused in this subclass.
"""
os.remove(key_name)
def get_all_keys(self, headers=None, **params):
"""
This method returns the single key around which this anonymous Bucket
was instantiated.
:rtype: SimpleResultSet
:return: The result from file system listing the keys requested
"""
key = Key(self.name, self.contained_key)
return SimpleResultSet([key])
def get_key(self, key_name, headers=None, version_id=None,
key_type=Key.KEY_REGULAR_FILE):
"""
Check to see if a particular key exists within the bucket.
Returns: An instance of a Key object or None
:type key_name: string
:param key_name: The name of the key to retrieve
:type version_id: string
:param version_id: Unused in this subclass.
:type stream_type: integer
:param stream_type: Type of the Key - Regular File or input/output Stream
:rtype: :class:`boto.file.key.Key`
:returns: A Key object from this bucket.
"""
if key_name == '-':
return Key(self.name, '-', key_type=Key.KEY_STREAM_READABLE)
else:
fp = open(key_name, 'rb')
return Key(self.name, key_name, fp)
def new_key(self, key_name=None, key_type=Key.KEY_REGULAR_FILE):
"""
Creates a new key
:type key_name: string
:param key_name: The name of the key to create
:rtype: :class:`boto.file.key.Key`
:returns: An instance of the newly created key object
"""
if key_name == '-':
return Key(self.name, '-', key_type=Key.KEY_STREAM_WRITABLE)
else:
dir_name = os.path.dirname(key_name)
if dir_name and not os.path.exists(dir_name):
os.makedirs(dir_name)
fp = open(key_name, 'wb')
return Key(self.name, key_name, fp)
| mit |
SeNeReKo/textplot-tcf | textplot_tcf/text.py | 1 | 4264 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from collections import OrderedDict
from textplot.text import Text as Text_
from tcflib.tcf import TextCorpus
class Text(Text_):
"""
Implementation of `textplot.Text` that reads an annotated text in TCF
format instead of processing plain text.
"""
@classmethod
def from_file(cls, path, **kwargs):
"""
Read corpus from a TCF file.
"""
# Since it is an XML file, read it in binary format.
return cls(open(path, 'rb').read(), **kwargs)
def __init__(self, text, stopwordfile=None, postags=None,
disambiguate=False):
"""
Parse the annotated TCF file into a `TextCorpus`.
:param stopwordfile: The stopword file, one stopword per line.
If `stopwordfile` is None, the default stopword file is used.
If `stopwordfile` is an empty string (``''``), no stopword list is
used.
:param postags: List of MAF part-of-speech tags that are taken into
account.
:param disambiguate: Use wordsenses information from TCF to
disambiguate lemmas.
"""
self.stopwordfile = stopwordfile
self.postags = postags
layers = ['text', 'tokens', 'lemmas']
if self.postags:
from tcflib.tagsets import TagSet
self.tagset = TagSet('DC-1345')
self.postags = [self.tagset[tag] for tag in postags]
layers.append('POStags')
self.disambiguate = disambiguate
if self.disambiguate:
layers.append('wsd')
self.corpus = TextCorpus(text, layers=layers)
self.text = self.corpus.text.text
self.tokenize()
def tokens_from_corpus(self):
"""
Create a token stream in textplots dict format from a `TextCorpus`.
"""
for offset, token in enumerate(self.corpus.tokens):
if self.disambiguate:
stemmed = '{} ({})'.format(token.lemma,
', '.join(token.wordsenses))
else:
stemmed = token.lemma
yield { # Emit the token.
'stemmed': stemmed,
'unstemmed': token.text,
'offset': offset,
'left': None,
'right': None,
'tcftoken': token,
}
def stopwords(self, path='stopwords.txt'):
"""
Load a set of stopwords.
Copied from textplot so the local stopword file is used.
"""
if self.stopwordfile == '':
return []
elif self.stopwordfile is None:
path = os.path.join(os.path.dirname(__file__), path)
else:
path = self.stopwordfile
with open(path) as f:
return set(f.read().splitlines())
def test_pos(self, token):
"""
Test if token has one of the allowed POS tags.
"""
if not self.postags:
# Do not test, always return True.
return True
token = token['tcftoken']
for postag in self.postags:
if token.postag.is_a(postag):
return True
return False
def tokenize(self):
"""
Tokenize the text and filter the token stream.
"""
self.tokens = []
self.terms = OrderedDict()
# Load stopwords.
stopwords = self.stopwords()
# Generate tokens.
for token in self.tokens_from_corpus():
# Ignore stopwords.
if (token['unstemmed'] in stopwords or
token['stemmed'] in stopwords or
not self.test_pos(token)):
self.tokens.append(None)
else:
# Token:
self.tokens.append(token)
# Term:
offsets = self.terms.setdefault(token['stemmed'], [])
offsets.append(token['offset'])
def unstem(self, term):
"""
Since this implementation uses lemmas instead of stems, it is not
neccessary to unstem terms.
"""
return term | apache-2.0 |
mancoast/CPythonPyc_test | cpython/252_list_tests.py | 7 | 15951 | """
Tests common to list and UserList.UserList
"""
import sys
import os
import unittest
from test import test_support, seq_tests
class CommonTest(seq_tests.CommonTest):
def test_init(self):
# Iterable arg is optional
self.assertEqual(self.type2test([]), self.type2test())
# Init clears previous values
a = self.type2test([1, 2, 3])
a.__init__()
self.assertEqual(a, self.type2test([]))
# Init overwrites previous values
a = self.type2test([1, 2, 3])
a.__init__([4, 5, 6])
self.assertEqual(a, self.type2test([4, 5, 6]))
# Mutables always return a new object
b = self.type2test(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
def test_repr(self):
l0 = []
l2 = [0, 1, 2]
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), str(l0))
self.assertEqual(repr(a0), repr(l0))
self.assertEqual(`a2`, `l2`)
self.assertEqual(str(a2), "[0, 1, 2]")
self.assertEqual(repr(a2), "[0, 1, 2]")
a2.append(a2)
a2.append(3)
self.assertEqual(str(a2), "[0, 1, 2, [...], 3]")
self.assertEqual(repr(a2), "[0, 1, 2, [...], 3]")
def test_print(self):
d = self.type2test(xrange(200))
d.append(d)
d.extend(xrange(200,400))
d.append(d)
d.append(400)
try:
fo = open(test_support.TESTFN, "wb")
print >> fo, d,
fo.close()
fo = open(test_support.TESTFN, "rb")
self.assertEqual(fo.read(), repr(d))
finally:
fo.close()
os.remove(test_support.TESTFN)
def test_set_subscript(self):
a = self.type2test(range(20))
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 0), [1,2,3])
self.assertRaises(TypeError, a.__setitem__, slice(0, 10), 1)
self.assertRaises(ValueError, a.__setitem__, slice(0, 10, 2), [1,2])
self.assertRaises(TypeError, a.__getitem__, 'x', 1)
a[slice(2,10,3)] = [1,2,3]
self.assertEqual(a, self.type2test([0, 1, 1, 3, 4, 2, 6, 7, 3,
9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19]))
def test_reversed(self):
a = self.type2test(range(20))
r = reversed(a)
self.assertEqual(list(r), self.type2test(range(19, -1, -1)))
self.assertRaises(StopIteration, r.next)
self.assertEqual(list(reversed(self.type2test())),
self.type2test())
def test_setitem(self):
a = self.type2test([0, 1])
a[0] = 0
a[1] = 100
self.assertEqual(a, self.type2test([0, 100]))
a[-1] = 200
self.assertEqual(a, self.type2test([0, 200]))
a[-2] = 100
self.assertEqual(a, self.type2test([100, 200]))
self.assertRaises(IndexError, a.__setitem__, -3, 200)
self.assertRaises(IndexError, a.__setitem__, 2, 200)
a = self.type2test([])
self.assertRaises(IndexError, a.__setitem__, 0, 200)
self.assertRaises(IndexError, a.__setitem__, -1, 200)
self.assertRaises(TypeError, a.__setitem__)
a = self.type2test([0,1,2,3,4])
a[0L] = 1
a[1L] = 2
a[2L] = 3
self.assertEqual(a, self.type2test([1,2,3,3,4]))
a[0] = 5
a[1] = 6
a[2] = 7
self.assertEqual(a, self.type2test([5,6,7,3,4]))
a[-2L] = 88
a[-1L] = 99
self.assertEqual(a, self.type2test([5,6,7,88,99]))
a[-2] = 8
a[-1] = 9
self.assertEqual(a, self.type2test([5,6,7,8,9]))
def test_delitem(self):
a = self.type2test([0, 1])
del a[1]
self.assertEqual(a, [0])
del a[0]
self.assertEqual(a, [])
a = self.type2test([0, 1])
del a[-2]
self.assertEqual(a, [1])
del a[-1]
self.assertEqual(a, [])
a = self.type2test([0, 1])
self.assertRaises(IndexError, a.__delitem__, -3)
self.assertRaises(IndexError, a.__delitem__, 2)
a = self.type2test([])
self.assertRaises(IndexError, a.__delitem__, 0)
self.assertRaises(TypeError, a.__delitem__)
def test_setslice(self):
l = [0, 1]
a = self.type2test(l)
for i in range(-3, 4):
a[:i] = l[:i]
self.assertEqual(a, l)
a2 = a[:]
a2[:i] = a[:i]
self.assertEqual(a2, a)
a[i:] = l[i:]
self.assertEqual(a, l)
a2 = a[:]
a2[i:] = a[i:]
self.assertEqual(a2, a)
for j in range(-3, 4):
a[i:j] = l[i:j]
self.assertEqual(a, l)
a2 = a[:]
a2[i:j] = a[i:j]
self.assertEqual(a2, a)
aa2 = a2[:]
aa2[:0] = [-2, -1]
self.assertEqual(aa2, [-2, -1, 0, 1])
aa2[0:] = []
self.assertEqual(aa2, [])
a = self.type2test([1, 2, 3, 4, 5])
a[:-1] = a
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5]))
a = self.type2test([1, 2, 3, 4, 5])
a[1:-1] = a
self.assertEqual(a, self.type2test([1, 1, 2, 3, 4, 5, 5]))
a = self.type2test([])
a[:] = tuple(range(10))
self.assertEqual(a, self.type2test(range(10)))
self.assertRaises(TypeError, a.__setslice__, 0, 1, 5)
self.assertRaises(TypeError, a.__setslice__)
def test_delslice(self):
a = self.type2test([0, 1])
del a[1:2]
del a[0:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1L:2L]
del a[0L:1L]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-2:-1]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[-2L:-1L]
self.assertEqual(a, self.type2test([1]))
a = self.type2test([0, 1])
del a[1:]
del a[:1]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[1L:]
del a[:1L]
self.assertEqual(a, self.type2test([]))
a = self.type2test([0, 1])
del a[-1:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[-1L:]
self.assertEqual(a, self.type2test([0]))
a = self.type2test([0, 1])
del a[:]
self.assertEqual(a, self.type2test([]))
def test_append(self):
a = self.type2test([])
a.append(0)
a.append(1)
a.append(2)
self.assertEqual(a, self.type2test([0, 1, 2]))
self.assertRaises(TypeError, a.append)
def test_extend(self):
a1 = self.type2test([0])
a2 = self.type2test((0, 1))
a = a1[:]
a.extend(a2)
self.assertEqual(a, a1 + a2)
a.extend(self.type2test([]))
self.assertEqual(a, a1 + a2)
a.extend(a)
self.assertEqual(a, self.type2test([0, 0, 1, 0, 0, 1]))
a = self.type2test("spam")
a.extend("eggs")
self.assertEqual(a, list("spameggs"))
self.assertRaises(TypeError, a.extend, None)
self.assertRaises(TypeError, a.extend)
def test_insert(self):
a = self.type2test([0, 1, 2])
a.insert(0, -2)
a.insert(1, -1)
a.insert(2, 0)
self.assertEqual(a, [-2, -1, 0, 0, 1, 2])
b = a[:]
b.insert(-2, "foo")
b.insert(-200, "left")
b.insert(200, "right")
self.assertEqual(b, self.type2test(["left",-2,-1,0,0,"foo",1,2,"right"]))
self.assertRaises(TypeError, a.insert)
def test_pop(self):
a = self.type2test([-1, 0, 1])
a.pop()
self.assertEqual(a, [-1, 0])
a.pop(0)
self.assertEqual(a, [0])
self.assertRaises(IndexError, a.pop, 5)
a.pop(0)
self.assertEqual(a, [])
self.assertRaises(IndexError, a.pop)
self.assertRaises(TypeError, a.pop, 42, 42)
a = self.type2test([0, 10, 20, 30, 40])
def test_remove(self):
a = self.type2test([0, 0, 1])
a.remove(1)
self.assertEqual(a, [0, 0])
a.remove(0)
self.assertEqual(a, [0])
a.remove(0)
self.assertEqual(a, [])
self.assertRaises(ValueError, a.remove, 0)
self.assertRaises(TypeError, a.remove)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.remove, BadCmp())
class BadCmp2:
def __eq__(self, other):
raise BadExc()
d = self.type2test('abcdefghcij')
d.remove('c')
self.assertEqual(d, self.type2test('abdefghcij'))
d.remove('c')
self.assertEqual(d, self.type2test('abdefghij'))
self.assertRaises(ValueError, d.remove, 'c')
self.assertEqual(d, self.type2test('abdefghij'))
# Handle comparison errors
d = self.type2test(['a', 'b', BadCmp2(), 'c'])
e = self.type2test(d)
self.assertRaises(BadExc, d.remove, 'c')
for x, y in zip(d, e):
# verify that original order and values are retained.
self.assert_(x is y)
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertRaises(TypeError, a.count)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxint, 4*sys.maxint), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxint,-4*sys.maxint)
self.assertRaises(ValueError, a.index, 2, 0, -10)
a.remove(0)
self.assertRaises(ValueError, a.index, 2, 0, 4)
self.assertEqual(a, self.type2test([-2, -1, 0, 1, 2]))
# Test modifying the list during index's iteration
class EvilCmp:
def __init__(self, victim):
self.victim = victim
def __eq__(self, other):
del self.victim[:]
return False
a = self.type2test()
a[:] = [EvilCmp(a) for _ in xrange(100)]
# This used to seg fault before patch #1005778
self.assertRaises(ValueError, a.index, None)
def test_reverse(self):
u = self.type2test([-2, -1, 0, 1, 2])
u2 = u[:]
u.reverse()
self.assertEqual(u, [2, 1, 0, -1, -2])
u.reverse()
self.assertEqual(u, u2)
self.assertRaises(TypeError, u.reverse, 42)
def test_sort(self):
u = self.type2test([1, 0])
u.sort()
self.assertEqual(u, [0, 1])
u = self.type2test([2,1,0,-1,-2])
u.sort()
self.assertEqual(u, self.type2test([-2,-1,0,1,2]))
self.assertRaises(TypeError, u.sort, 42, 42)
def revcmp(a, b):
return cmp(b, a)
u.sort(revcmp)
self.assertEqual(u, self.type2test([2,1,0,-1,-2]))
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
return cmp(x%3, y%7)
z = self.type2test(range(12))
z.sort(myComparison)
self.assertRaises(TypeError, z.sort, 2)
def selfmodifyingComparison(x,y):
z.append(1)
return cmp(x, y)
self.assertRaises(ValueError, z.sort, selfmodifyingComparison)
self.assertRaises(TypeError, z.sort, lambda x, y: 's')
self.assertRaises(TypeError, z.sort, 42, 42, 42, 42)
def test_slice(self):
u = self.type2test("spam")
u[:2] = "h"
self.assertEqual(u, list("ham"))
def test_iadd(self):
super(CommonTest, self).test_iadd()
u = self.type2test([0, 1])
u2 = u
u += [2, 3]
self.assert_(u is u2)
u = self.type2test("spam")
u += "eggs"
self.assertEqual(u, self.type2test("spameggs"))
self.assertRaises(TypeError, u.__iadd__, None)
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
u *= 0
self.assertEqual(u, self.type2test([]))
s = self.type2test([])
oldid = id(s)
s *= 10
self.assertEqual(id(s), oldid)
def test_extendedslicing(self):
# subscript
a = self.type2test([0,1,2,3,4])
# deletion
del a[::2]
self.assertEqual(a, self.type2test([1,3]))
a = self.type2test(range(5))
del a[1::2]
self.assertEqual(a, self.type2test([0,2,4]))
a = self.type2test(range(5))
del a[1::-2]
self.assertEqual(a, self.type2test([0,2,3,4]))
a = self.type2test(range(10))
del a[::1000]
self.assertEqual(a, self.type2test([1, 2, 3, 4, 5, 6, 7, 8, 9]))
# assignment
a = self.type2test(range(10))
a[::2] = [-1]*5
self.assertEqual(a, self.type2test([-1, 1, -1, 3, -1, 5, -1, 7, -1, 9]))
a = self.type2test(range(10))
a[::-4] = [10]*3
self.assertEqual(a, self.type2test([0, 10, 2, 3, 4, 10, 6, 7, 8 ,10]))
a = self.type2test(range(4))
a[::-1] = a
self.assertEqual(a, self.type2test([3, 2, 1, 0]))
a = self.type2test(range(10))
b = a[:]
c = a[:]
a[2:3] = self.type2test(["two", "elements"])
b[slice(2,3)] = self.type2test(["two", "elements"])
c[2:3:] = self.type2test(["two", "elements"])
self.assertEqual(a, b)
self.assertEqual(a, c)
a = self.type2test(range(10))
a[::2] = tuple(range(5))
self.assertEqual(a, self.type2test([0, 1, 1, 3, 2, 5, 3, 7, 4, 9]))
def test_constructor_exception_handling(self):
# Bug #1242657
class F(object):
def __iter__(self):
yield 23
def __len__(self):
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, list, F())
| gpl-3.0 |
mahinthjoe/bedrock | py3env/lib/python3.4/site-packages/pip/_vendor/requests/utils.py | 618 | 21334 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import platform
import re
import sys
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
return os.fstat(fileno).st_size
if hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
return len(o.getvalue())
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""
This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""
Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""Very simple check of the cidr format in no_proxy variable"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.replace(' ', '').split(',')
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on OS X in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def default_user_agent(name="python-requests"):
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['%s/%s' % (name, __version__),
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
| mpl-2.0 |
nanditav/15712-TensorFlow | tensorflow/python/kernel_tests/tensor_array_ops_test.py | 3 | 44035 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tensor_array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.ops import tensor_array_ops
class TensorArrayCPUTest(tf.test.TestCase):
_use_gpu = False
def testTensorArrayWriteRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
def _testTensorArrayWritePack(self, tf_dtype, legacy):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype == tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
if legacy:
c0 = w2._legacy_pack()
else:
c0 = w2.pack()
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0.eval())
def _testTensorArrayWritePackMaybeLegacy(self, legacy):
self._testTensorArrayWritePack(tf.float32, legacy)
self._testTensorArrayWritePack(tf.float64, legacy)
self._testTensorArrayWritePack(tf.int32, legacy)
self._testTensorArrayWritePack(tf.int64, legacy)
self._testTensorArrayWritePack(tf.complex64, legacy)
self._testTensorArrayWritePack(tf.complex128, legacy)
self._testTensorArrayWritePack(tf.string, legacy)
def testTensorArrayWritePack(self):
self._testTensorArrayWritePackMaybeLegacy(legacy=False)
def testTensorArrayWritePackLegacy(self):
self._testTensorArrayWritePackMaybeLegacy(legacy=True)
def _testTensorArrayWriteConcat(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
if tf_dtype == tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.concat()
self.assertAllEqual(
convert([[4.0, 5.0],
[104.0, 105.0],
[204.0, 205.0],
[6.0, 7.0],
[106.0, 107.0],
[8.0, 9.0]]), c0.eval())
def testTensorArrayWriteConcat(self):
self._testTensorArrayWriteConcat(tf.float32)
self._testTensorArrayWriteConcat(tf.float64)
self._testTensorArrayWriteConcat(tf.int32)
self._testTensorArrayWriteConcat(tf.int64)
self._testTensorArrayWriteConcat(tf.complex64)
self._testTensorArrayWriteConcat(tf.complex128)
self._testTensorArrayWriteConcat(tf.string)
def testTensorArrayLegacyUnpackWrongMajorSizeFails(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
r"Input value must have first dimension "
r"equal to the array size \(2 vs. 3\)"):
ta._legacy_unpack([1.0, 2.0]).flow.eval()
def _testTensorArrayPackNotAllValuesAvailableFails(self, legacy):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not read from TensorArray index 1 "
"because it has not yet been written to."):
if legacy:
ta.write(0, [[4.0, 5.0]])._legacy_pack().eval()
else:
ta.write(0, [[4.0, 5.0]]).pack().eval()
def testTensorArrayPackNotAllValuesAvailableFails(self):
self._testTensorArrayPackNotAllValuesAvailableFails(legacy=False)
def testTensorArrayPackNotAllValuesAvailableFailsLegacy(self):
self._testTensorArrayPackNotAllValuesAvailableFails(legacy=True)
def _testTensorArrayUnpackRead(self, tf_dtype, legacy):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
if tf_dtype is tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
# Unpack a vector into scalars
if legacy:
w0 = ta._legacy_unpack(convert([1.0, 2.0, 3.0]))
else:
w0 = ta.unpack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Unpack a matrix into vectors
if legacy:
w1 = ta._legacy_unpack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
else:
w1 = ta.unpack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 1.1]), d0)
self.assertAllEqual(convert([2.0, 2.1]), d1)
self.assertAllEqual(convert([3.0, 3.1]), d2)
# Reset ta because we're going to change the shape, else shape
# inference will throw an error.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Try unpacking an empty matrix, which should not cause an error.
if legacy:
w2 = ta._legacy_unpack(convert([[], [], []]))
else:
w2 = ta.unpack(convert([[], [], []]))
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def _testTensorArrayUnpackReadMaybeLegacy(self, legacy):
self._testTensorArrayUnpackRead(tf.float32, legacy)
self._testTensorArrayUnpackRead(tf.float64, legacy)
self._testTensorArrayUnpackRead(tf.int32, legacy)
self._testTensorArrayUnpackRead(tf.int64, legacy)
self._testTensorArrayUnpackRead(tf.complex64, legacy)
self._testTensorArrayUnpackRead(tf.complex128, legacy)
self._testTensorArrayUnpackRead(tf.string, legacy)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy(legacy=False)
def testTensorArrayUnpackReadLegacy(self):
self._testTensorArrayUnpackReadMaybeLegacy(legacy=True)
def _testTensorArraySplitRead(self, tf_dtype):
dtype = tf_dtype.as_numpy_dtype()
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
if tf_dtype == tf.string:
# In Python3, np.str is unicode, while we always want bytes
convert = lambda x: np.asarray(x).astype("|S")
else:
convert = lambda x: np.asarray(x).astype(dtype)
# Split an empty vector
lengths = tf.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
# Split a vector
lengths = tf.constant([2, 0, 1])
w0 = ta.split(
convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 2.0]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([3.0]), d2)
# Split a matrix
lengths = tf.constant([2, 0, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([[1.0, 101.0], [2.0, 201.0]]), d0)
self.assertAllEqual(convert([]).reshape(0, 2), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
def testTensorArraySplitRead(self):
self._testTensorArraySplitRead(tf.float32)
self._testTensorArraySplitRead(tf.float64)
self._testTensorArraySplitRead(tf.int32)
self._testTensorArraySplitRead(tf.int64)
self._testTensorArraySplitRead(tf.complex64)
self._testTensorArraySplitRead(tf.complex128)
self._testTensorArraySplitRead(tf.string)
def testTensorGradArrayWriteRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
g_ta = ta.grad("grad")
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2 = session.run([r0, r1, r2, g_r0, g_r1, g_r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
def testTensorGradArrayDynamicWriteRead(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run([
r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
def testTensorGradAccessTwiceReceiveSameObject(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
with tf.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
t_g_ta_0, t_g_ta_1, d_r1_0 = session.run(
[g_ta_0.handle, g_ta_1.handle, r1_0])
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
# Test writing the wrong datatype
with self.assertRaisesOpError(
"TensorArray dtype is float but Op is trying to write dtype string"):
ta.write(-1, "wrong_type_scalar").flow.eval()
# Test writing to a negative index
with self.assertRaisesOpError(
"Tried to write to index -1 but array is not "
"resizeable and size is: 3"):
ta.write(-1, 3.0).flow.eval()
# Test reading from too large an index
with self.assertRaisesOpError(
"Tried to write to index 3 but array is not "
"resizeable and size is: 3"):
ta.write(3, 3.0).flow.eval()
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
# Test reading wrong datatype
r0_bad = gen_data_flow_ops._tensor_array_read(
handle=w0.handle, index=0, dtype=tf.int64, flow_in=w0.flow)
with self.assertRaisesOpError(
"TensorArray dtype is float but Op requested dtype int64."):
r0_bad.eval()
# Test reading from a different index than the one we wrote to
r1 = w0.read(1)
with self.assertRaisesOpError(
"Could not read from TensorArray index 1 because "
"it has not yet been written to."):
r1.eval()
# Test reading from a negative index
with self.assertRaisesOpError(
r"Tried to read from index -1 but array size is: 3"):
ta.read(-1).eval()
# Test reading from too large an index
with self.assertRaisesOpError(
"Tried to read from index 3 but array size is: 3"):
ta.read(3).eval()
def testTensorArrayWriteMultipleFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not write to TensorArray index 2 because "
"it has already been written to."):
ta.write(2, 3.0).write(2, 3.0).flow.eval()
def testTensorArrayConcatIncompatibleShapesFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
w1 = ta.write(0, 3.0)
w2 = w1.write(1, 4.0)
w3 = w2.write(2, [3.0])
with self.assertRaisesOpError(
"Concat saw a scalar shape at index 0 but requires at least vectors"):
w3.concat().eval()
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
w1 = ta.write(0, [3.0])
w2 = w1.write(1, [4.0])
w3 = w2.write(2, [[3.0]])
with self.assertRaisesOpError(
r"TensorArray has inconsistent shapes. Index 0 has "
r"\(excepting dimension 0\) shape: \[\] but index 2 has \(excepting "
r"dimension 0\) shape: \[1\]"):
w3.concat().eval()
def testTensorArraySplitIncompatibleShapesFails(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
with self.assertRaisesOpError(
r"Expected lengths to be a vector, received shape: \[\]"):
lengths = tf.placeholder(tf.int64)
ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})
with self.assertRaisesOpError(
r"Expected sum of lengths to be equal to values.shape\[0\], "
r"but sum of lengths is 1 and value's shape is: \[3\]"):
ta.split([1.0, 2.0, 3.0], [1]).flow.eval()
with self.assertRaisesOpError(
r"Expected value to be at least a vector, but received shape: \[\]"):
ta.split(1.0, [1]).flow.eval()
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2, infer_shape=False)
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(2 vs. 1\), and the TensorArray is not marked as "
r"dynamically resizeable"):
ta.split([1.0], [1]).flow.eval()
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
ta_grad = ta.grad("grad")
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), w2_grad.read(2).eval())
# Assert that if multiple_writes_aggregate is not enabled,
# multiple writes raise an exception.
with self.assertRaisesOpError(
r"TensorArray foo_.*: Could not write to TensorArray index 2 because "
r"it has already been written to."):
w1.flow.eval()
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
with self.assertRaisesOpError(
r"Could not aggregate to TensorArray index 1 because the "
r"existing shape is \[\] but the new input shape is \[1\]"):
wb1_grad.flow.eval()
def testTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in (tf.int32, tf.int64, tf.float32,
tf.float64, tf.complex64, tf.complex128):
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
def testMultiTensorArray(self):
with self.test_session(use_gpu=self._use_gpu):
h1 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
r = r1 + r2
self.assertAllClose(9.0, r.eval())
def testDuplicateTensorArrayHasDifferentName(self):
with self.test_session(use_gpu=self._use_gpu) as session:
h1 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="foo")
c1 = h1.write(0, 4.0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=tf.float32, tensor_array_name="foo")
c2 = h2.write(0, 5.0)
_, _, c1h, c2h = session.run([c1.flow, c2.flow, c1.handle, c2.handle])
c1h = [x.decode("ascii") for x in c1h]
c2h = [x.decode("ascii") for x in c2h]
self.assertEqual(c1h[0], "_tensor_arrays")
self.assertEqual(c2h[0], "_tensor_arrays")
self.assertTrue(c1h[1].startswith("foo_"))
self.assertTrue(c2h[1].startswith("foo_"))
self.assertNotEqual(c1h[1], c2h[1])
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.as_dtype(dtype), tensor_array_name="foo", size=3,
infer_shape=False)
c = lambda x: np.array(x, dtype=dtype)
value_0 = tf.constant(c([[4.0, 5.0]]))
value_1 = tf.constant(c(3.0))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = tf.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_just_r0_vals = session.run(grad_just_r0)
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
grad_r0_r0_2 = tf.gradients(
ys=[r0, r0_2], xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_r0_r0_2_vals = session.run(grad_r0_r0_2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
grad_just_r1 = tf.gradients(
ys=[r1], xs=[value_1], grad_ys=[c(-2.0)])
grad_just_r1_vals = session.run(grad_just_r1)
self.assertAllEqual(c(-2.0), grad_just_r1_vals[0])
# Test combined gradients
grad = tf.gradients(
ys=[r0, r0_2, r1], xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]]), c(-2.0)])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c(-2.0), grad_vals[1])
def testTensorArrayGradientWriteRead(self):
for dtype in (np.float32, np.float64, np.int32,
np.int64, np.complex64, np.complex128):
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self, legacy):
with self.test_session(use_gpu=self._use_gpu) as sess:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2,
clear_after_read=False)
value_0 = tf.constant([-1.0, 1.0])
value_1 = tf.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
if legacy:
p0 = w1._legacy_pack()
else:
p0 = w1.pack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat()
with tf.control_dependencies([p0, r0, s0]):
grad_r = tf.gradients(
ys=[p0, r0, s0], xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # pack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0]]) # concat gradient
grad_vals = sess.run(grad_r) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
def testTensorArrayGradientWritePackConcatAndRead(self):
self._testTensorArrayGradientWritePackConcatAndRead(legacy=False)
def testTensorArrayGradientWritePackConcatAndReadLegacy(self):
self._testTensorArrayGradientWritePackConcatAndRead(legacy=True)
def testTensorArrayReadTwice(self):
with self.test_session(use_gpu=self._use_gpu):
value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readonce = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2)
w_readonce = ta_readonce.unpack(value)
r0_readonce = w_readonce.read(0)
with tf.control_dependencies([r0_readonce]):
r1_readonce = w_readonce.read(0)
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"):
r1_readonce.eval()
ta_readtwice = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unpack(value)
r0_readtwice = w_readtwice.read(0)
with tf.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], r1_readtwice.eval())
def _testTensorArrayGradientUnpackRead(self, legacy):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2,
clear_after_read=False)
value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
if legacy:
w = ta._legacy_unpack(value)
else:
w = ta.unpack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = tf.gradients(
ys=[r0, r0_1, r1], xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientUnpackRead(self):
self._testTensorArrayGradientUnpackRead(legacy=False)
def testTensorArrayGradientUnpackReadLegacy(self):
self._testTensorArrayGradientUnpackRead(legacy=True)
def testTensorArrayGradientSplitConcat(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=2)
value = tf.constant([[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w = ta.split(value, [2, 1])
r = w.concat()
# Test combined gradients
grad = tf.gradients(
ys=[r], xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual(
[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]], grad_vals[0])
def _testTensorArrayGradientDynamicUnpackRead(self, legacy):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True)
value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
if legacy:
w = ta._legacy_unpack(value)
else:
w = ta.unpack(value)
r0 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = tf.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientDynamicUnpackRead(self):
self._testTensorArrayGradientDynamicUnpackRead(legacy=False)
def testTensorArrayGradientDynamicUnpackReadLegacy(self):
self._testTensorArrayGradientDynamicUnpackRead(legacy=True)
def testCloseTensorArray(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
c1 = ta.close()
session.run(c1)
def testSizeTensorArray(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, s.eval())
def testWriteCloseTensorArray(self):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [3.0])
w1.close().run() # Expected to run without problems
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype, legacy):
np_dtype = dtype.as_numpy_dtype
with self.test_session(use_gpu=self._use_gpu) as session:
v0 = tf.identity(np.arange(3*5, dtype=np_dtype).reshape(3, 5))
var = tf.Variable(np.arange(100, 105, dtype=np_dtype))
state0 = tf.identity(np.array([1] * 5, dtype=np_dtype))
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo",
size=0 if dynamic_size else 3, dynamic_size=dynamic_size)
time_0 = tf.identity(0)
def body(time, ta_t, state):
sliced = tf.slice(v0, begin=tf.stack([time, 0]), size=[1, -1])
sliced = tf.squeeze(sliced)
out = sliced + var + state
state += sliced
ta_t = ta_t.write(time, out)
return (time+1, ta_t, state)
(unused_0, h_final, unused_2) = tf.while_loop(
cond=lambda time, unused_1, unused_2: time < 3,
body=body,
loop_vars=(time_0, ta, state0),
shape_invariants=(time_0.get_shape(),
tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()),
parallel_iterations=3)
if legacy:
vout = h_final._legacy_pack()
else:
vout = h_final.pack()
grad_val = -np.arange(3*5, dtype=np_dtype).reshape(3, 5)
v0_grad = tf.gradients([vout], [v0], [grad_val])[0]
state0_grad = tf.gradients([vout], [state0], [grad_val])[0]
var_grad = tf.gradients([vout], [var], [grad_val])[0]
tf.global_variables_initializer().run()
state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
session.run([state0, var, v0, vout, v0_grad, var_grad, state0_grad]))
just_v0_grad_t, = session.run([v0_grad])
# state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# vout = [ v0[0] + var + state[0] |
# v0[1] + var + state[1] |
# v0[2] + var + state[2] ]
# = [ v0[0] + var + state0 |
# v0[1] + var + state0 + v0[0] |
# v0[2] + var + state0 + v0[0] + v0[1] ]
#
# d(vout[0])/d(v0) = [1 | 0 | 0 ]
# d(vout[1])/d(v0) = [1 | 1 | 0 ]
# d(vout[2])/d(v0) = [1 | 1 | 1 ]
# d(vout)/d(var) = [1 | 1 | 1]
# d(vout)/d(state0) = [ 1 | 1 | 1 ]
state_per_time = np.array([
state0_t,
state0_t + v0_t[0, :],
state0_t + v0_t[0, :] + v0_t[1, :]])
# Compare forward prop
self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# Compare backward prop
expected_v0_grad_t = np.array([
grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
grad_val[1, :] + grad_val[2, :],
grad_val[2, :]])
self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
def testWhileLoopWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=False, dtype=tf.float32, legacy=False)
# TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=tf.int64)
def testWhileLoopWritePackGradientsLegacy(self):
self._testWhileLoopWritePackGradients(
dynamic_size=False, dtype=tf.float32, legacy=True)
# TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=tf.int64)
def testWhileLoopDynamicWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=True, dtype=tf.float32, legacy=False)
def testWhileLoopDynamicWritePackGradientsLegacy(self):
self._testWhileLoopWritePackGradients(
dynamic_size=True, dtype=tf.float32, legacy=True)
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.test_session(use_gpu=self._use_gpu) as session:
a = tf.identity(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1)
b = tf.identity(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1 + 3*5)
ta = tensor_array_ops.TensorArray(dtype=tf.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (ta.read(0, name="read_a_0") + # a + b
ta.read(1, name="read_b_0"))
g0 = -(np.arange(3*5, dtype=np.float32).reshape(3, 5) + 1)
grad_a = tf.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = tf.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = session.run([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly
joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def _grad_source_for_name(self, name):
return tensor_array_grad._GetGradSource(tf.constant(0, name=name))
def testGetGradSource_Invalid(self):
with self.assertRaises(ValueError):
self._grad_source_for_name("")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo/bar")
def testGetGradSource_NoEnclosingScope(self):
self.assertEqual("gradients:0", self._grad_source_for_name("gradients"))
self.assertEqual("gradients_0:0", self._grad_source_for_name("gradients_0"))
self.assertEqual("gradients", self._grad_source_for_name("gradients/foo"))
self.assertEqual(
"gradients_0", self._grad_source_for_name("gradients_0/foo"))
self.assertEqual(
"gradients", self._grad_source_for_name("gradients/foo/bar"))
self.assertEqual(
"gradients_0", self._grad_source_for_name("gradients_0/foo/bar"))
def testGetGradSource_EnclosingScope(self):
self.assertEqual(
"foo/gradients:0", self._grad_source_for_name("foo/gradients"))
self.assertEqual(
"foo/gradients_0:0", self._grad_source_for_name("foo/gradients_0"))
self.assertEqual(
"foo/gradients", self._grad_source_for_name("foo/gradients/bar"))
self.assertEqual(
"foo/gradients_0", self._grad_source_for_name("foo/gradients_0/bar"))
self.assertEqual(
"foo/bar/gradients",
self._grad_source_for_name("foo/bar/gradients/baz"))
self.assertEqual(
"foo/bar/gradients_0",
self._grad_source_for_name("foo/bar/gradients_0/baz"))
def testGetGradSource_NestedUsesInnermost(self):
self.assertEqual(
"foo/gradients/bar/gradients_0",
self._grad_source_for_name("foo/gradients/bar/gradients_0/baz"))
def testWriteShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
c0 = tf.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
c1 = tf.constant([6.0, 7.0])
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3)
c2 = tf.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w0.write(0, c2)
def _testUnpackShape(self, legacy):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo",
size=0, dynamic_size=True, infer_shape=True)
value = tf.constant([[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
if legacy:
w0 = ta._legacy_unpack(value)
else:
w0 = ta.unpack(value)
r0 = w0.read(0)
self.assertAllEqual((2,), r0.get_shape())
c1 = tf.constant([4.0, 5.0])
w1 = w0.write(3, c1)
r1 = w1.read(0)
self.assertAllEqual(c1.get_shape(), r1.get_shape())
c2 = tf.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w1.write(4, c2)
def testUnpackShape(self):
self._testUnpackShape(legacy=False)
def testUnpackShapeLegacy(self):
self._testUnpackShape(legacy=True)
def testSplitShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo",
size=0, dynamic_size=True, infer_shape=True)
value = tf.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
w0 = ta.split(value, [1, 1, 1])
r0 = w0.read(0)
self.assertAllEqual((1, 2), r0.get_shape())
ta1 = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo1",
size=0, dynamic_size=True, infer_shape=True)
w0 = ta1.split(value, [1, 2])
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def testWriteUnknownShape(self):
with self.test_session():
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=3, infer_shape=True)
c0 = tf.placeholder(tf.float32)
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def _testGradientWhenNotAllComponentsRead(self, legacy):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(dtype=tf.float32, size=2)
x = tf.constant([2.0, 3.0])
if legacy:
w = ta._legacy_unpack(x)
else:
w = ta.unpack(x)
r0 = w.read(0)
# calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0 = tf.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = session.run(grad_r0)[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
def testGradientWhenNotAllComponentsRead(self):
self._testGradientWhenNotAllComponentsRead(legacy=False)
def testGradientWhenNotAllComponentsReadLegacy(self):
self._testGradientWhenNotAllComponentsRead(legacy=True)
def _testTensorArrayUnpackDynamic(self, legacy):
with self.test_session(use_gpu=self._use_gpu) as sess:
ta = tensor_array_ops.TensorArray(dtype=tf.float32, size=3,
dynamic_size=True)
x = tf.constant([1.0, 2.0, 3.0])
if legacy:
w0 = ta._legacy_unpack(x)
else:
w0 = ta.unpack(x)
w1 = w0.write(3, 4.0)
if legacy:
r = w1._legacy_pack()
else:
r = w1.pack()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), r.eval())
grad = tf.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]),
sess.run(grad)[0])
def testTensorArrayUnpackDynamic(self):
self._testTensorArrayUnpackDynamic(legacy=False)
def testTensorArrayUnpackDynamicLegacy(self):
self._testTensorArrayUnpackDynamic(legacy=True)
def testTensorArraySplitDynamic(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
ta = tensor_array_ops.TensorArray(dtype=tf.float32, size=3,
dynamic_size=True)
x = tf.constant([1.0, 2.0, 3.0])
w0 = ta.split(x, [1, 1, 1])
w1 = w0.write(3, [4.0])
r = w1.concat()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), r.eval())
grad = tf.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]),
sess.run(grad)[0])
def _testTensorArrayEvalEmpty(self, legacy):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=False,
infer_shape=False)
with self.assertRaisesOpError(
"TensorArray has size zero, but element shape <unknown> is not fully "
"defined. Currently only static shapes are supported when packing "
"zero-size TensorArrays."):
if legacy:
ta._legacy_pack().eval()
else:
ta.pack().eval()
def testTensorArrayEvalEmpty(self):
self._testTensorArrayEvalEmpty(legacy=False)
def testTensorArrayEvalEmptyLegacy(self):
self._testTensorArrayEvalEmpty(legacy=True)
def _testTensorArrayEvalEmptyWithDefault(self, legacy):
with self.test_session(use_gpu=self._use_gpu):
ta = tensor_array_ops.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=False,
infer_shape=True)
self.assertEqual(0, ta.size().eval())
if legacy:
# Don't actually perform the pack. This stores the static shape.
ta._legacy_unpack(tf.zeros([1, 3, 5]))
packed = ta._legacy_pack()
else:
# Don't actually perform the pack. This stores the static shape.
ta.unpack(tf.zeros([0, 3, 5]))
packed = ta.pack()
self.assertAllEqual([0, 3, 5], packed.eval().shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
self.assertAllEqual([0, 5], ta.concat().eval().shape)
def testTensorArrayEvalEmptyWithDefault(self):
self._testTensorArrayEvalEmptyWithDefault(legacy=False)
def testTensorArrayEvalEmptyWithDefaultLegacy(self):
self._testTensorArrayEvalEmptyWithDefault(legacy=True)
def testTensorArrayScatterReadAndGradients(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True)
indices = tf.constant([1, 8])
value = tf.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(1)
r1 = w.read(8)
# Test combined gradients + aggregation of read(0)
grad = tf.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
read_vals, grad_vals = session.run([[r0, r1], grad])
self.assertEqual(len(read_vals), 2)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([1.0, -1.0], read_vals[0])
self.assertAllEqual([10.0, -10.0], read_vals[1])
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
def testTensorArrayWriteGatherAndGradients(self):
with self.test_session(use_gpu=self._use_gpu) as session:
ta = tensor_array_ops.TensorArray(
dtype=tf.float32, tensor_array_name="foo", size=0, dynamic_size=True)
values = tf.constant([[1.0*x, -1.0*x] for x in range(10)])
indices = tf.constant([1, 8])
w = ta.unpack(values)
g = w.gather(indices)
# Test combined gradients + aggregation of read(0)
grad = tf.gradients(
ys=[g], xs=[values], grad_ys=[[[2.0, 3.0], [4.0, 5.0]]])
g_vals, grad_vals = session.run([[g], grad])
# Gradients for 8 of the 10 unread components are zero.
expected_grad = np.zeros((10, 2))
expected_grad[1] = [2.0, 3.0]
expected_grad[8] = [4.0, 5.0]
self.assertEqual(len(g_vals), 1)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[1.0, -1.0], [8.0, -8.0]], g_vals[0])
self.assertAllEqual(expected_grad, grad_vals[0])
class TensorArrayGPUTest(TensorArrayCPUTest):
_use_gpu = True
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
mpurzynski/MozDef | alerts/geomodel/execution.py | 3 | 2879 | from datetime import datetime
from typing import Callable, NamedTuple, Optional
from mozdef_util.elasticsearch_client import ElasticsearchClient as ESClient
from mozdef_util.query_models import SearchQuery, TermMatch
from mozdef_util.utilities.toUTC import toUTC
_TYPE_NAME = 'execution_state'
class ExecutionState(NamedTuple):
'''A record of an alert's execution at a particular time, used to create a
sliding window through which an alert can query for relevant events and
not run the risk of missing any due to relying only on searching some
configured amount of time in the past.
'''
type_: str
# alert_name: str
execution_time: datetime
def new(executed_at: Optional[datetime]=None) -> 'ExecutionState':
'''Construct a new `ExecutionState` representing the execution of an
alert at a specific time.
By default, the execution time will be set to when this function is
called if not explicitly provided.
'''
if executed_at is None:
executed_at = toUTC(datetime.now())
return ExecutionState(_TYPE_NAME, executed_at)
class Record(NamedTuple):
'''A container for data identifying an `ExecutionState` in ElasticSearch.
'''
identifier: Optional[str]
state: ExecutionState
def new(state: ExecutionState) -> 'Record':
'''Construct a new `Record` that, when stored, will result in a new
document being inserted into ElasticSearch.
'''
return Record('', state)
Index = str
StoreInterface = Callable[[Record, Index], None]
LoadInterface = Callable[[Index], Optional[Record]]
def _dict_take(dictionary, keys):
return {key: dictionary[key] for key in keys}
def store(client: ESClient) -> StoreInterface:
'''Wrap an `ElasticsearchClient` in a `StoreInterface` closure to be
invoked without requiring direct access to the client in order to
persist an `ExecutionState`.
'''
def wrapper(record: Record, esindex: Index):
doc = dict(record.state._asdict())
client.save_object(index=esindex, body=doc, doc_id=record.identifier)
return wrapper
def load(client: ESClient) -> LoadInterface:
'''Wrap an `ElasticsearchClient` in a `LoadInterface` closure to be
invoed without requiring direct access to the client in order to retrieve
an `ExecutionState`.
'''
def wrapper(esindex: Index=None) -> Optional[Record]:
query = SearchQuery()
query.add_must(TermMatch('type_', _TYPE_NAME))
results = query.execute(client, indices=[esindex])
if len(results['hits']) == 0:
return None
eid = results['hits'][0]['_id']
state = ExecutionState(**_dict_take(
results['hits'][0].get('_source', {}),
ExecutionState._fields))
return Record(eid, state)
return wrapper
| mpl-2.0 |
CHBMB/LazyLibrarian | lib/apscheduler/threadpool.py | 138 | 3982 | """
Generic thread pool class. Modeled after Java's ThreadPoolExecutor.
Please note that this ThreadPool does *not* fully implement the PEP 3148
ThreadPool!
"""
from threading import Thread, Lock, currentThread
from weakref import ref
import logging
import atexit
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
logger = logging.getLogger(__name__)
_threadpools = set()
# Worker threads are daemonic in order to let the interpreter exit without
# an explicit shutdown of the thread pool. The following trick is necessary
# to allow worker threads to finish cleanly.
def _shutdown_all():
for pool_ref in tuple(_threadpools):
pool = pool_ref()
if pool:
pool.shutdown()
atexit.register(_shutdown_all)
class ThreadPool(object):
def __init__(self, core_threads=0, max_threads=20, keepalive=1):
"""
:param core_threads: maximum number of persistent threads in the pool
:param max_threads: maximum number of total threads in the pool
:param thread_class: callable that creates a Thread object
:param keepalive: seconds to keep non-core worker threads waiting
for new tasks
"""
self.core_threads = core_threads
self.max_threads = max(max_threads, core_threads, 1)
self.keepalive = keepalive
self._queue = Queue()
self._threads_lock = Lock()
self._threads = set()
self._shutdown = False
_threadpools.add(ref(self))
logger.info('Started thread pool with %d core threads and %s maximum '
'threads', core_threads, max_threads or 'unlimited')
def _adjust_threadcount(self):
self._threads_lock.acquire()
try:
if self.num_threads < self.max_threads:
self._add_thread(self.num_threads < self.core_threads)
finally:
self._threads_lock.release()
def _add_thread(self, core):
t = Thread(target=self._run_jobs, args=(core,))
t.setDaemon(True)
t.start()
self._threads.add(t)
def _run_jobs(self, core):
logger.debug('Started worker thread')
block = True
timeout = None
if not core:
block = self.keepalive > 0
timeout = self.keepalive
while True:
try:
func, args, kwargs = self._queue.get(block, timeout)
except Empty:
break
if self._shutdown:
break
try:
func(*args, **kwargs)
except:
logger.exception('Error in worker thread')
self._threads_lock.acquire()
self._threads.remove(currentThread())
self._threads_lock.release()
logger.debug('Exiting worker thread')
@property
def num_threads(self):
return len(self._threads)
def submit(self, func, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new tasks after shutdown')
self._queue.put((func, args, kwargs))
self._adjust_threadcount()
def shutdown(self, wait=True):
if self._shutdown:
return
logging.info('Shutting down thread pool')
self._shutdown = True
_threadpools.remove(ref(self))
self._threads_lock.acquire()
for _ in range(self.num_threads):
self._queue.put((None, None, None))
self._threads_lock.release()
if wait:
self._threads_lock.acquire()
threads = tuple(self._threads)
self._threads_lock.release()
for thread in threads:
thread.join()
def __repr__(self):
if self.max_threads:
threadcount = '%d/%d' % (self.num_threads, self.max_threads)
else:
threadcount = '%d' % self.num_threads
return '<ThreadPool at %x; threads=%s>' % (id(self), threadcount)
| gpl-3.0 |
alex/django-debug-toolbar | debug_toolbar/panels/timer.py | 1 | 3006 | import time
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.debug.timer import DebugTimer
from debug_toolbar.panels import DebugPanel
class TimerDebugPanel(DebugPanel):
"""
Panel that displays the time a response took in milliseconds.
"""
name = 'Timer'
def __init__(self, context={}):
super(TimerDebugPanel, self).__init__(context)
self.timer = DebugTimer()
self.has_content = self.timer.has_resource
def process_request(self, request):
self.timer.start(request)
def process_response(self, request, response):
self.timer.stop(request, response)
def nav_title(self):
return _('Time')
def nav_subtitle(self):
# TODO l10n
if self.timer.has_resource:
utime = self.timer._end_rusage.ru_utime - self.timer._start_rusage.ru_utime
stime = self.timer._end_rusage.ru_stime - self.timer._start_rusage.ru_stime
return 'CPU: %0.2fms (%0.2fms)' % ((utime + stime) * 1000.0, self.timer.total_time)
else:
return 'TOTAL: %0.2fms' % (self.timer.total_time)
def title(self):
return _('Resource Usage')
def url(self):
return ''
def content(self):
utime = 1000 * self.timer.elapsed_ru('ru_utime')
stime = 1000 * self.timer.elapsed_ru('ru_stime')
vcsw = self.timer.elapsed_ru('ru_nvcsw')
ivcsw = self.timer.elapsed_ru('ru_nivcsw')
minflt = self.timer.elapsed_ru('ru_minflt')
majflt = self.timer.elapsed_ru('ru_majflt')
# these are documented as not meaningful under Linux. If you're running BSD
# feel free to enable them, and add any others that I hadn't gotten to before
# I noticed that I was getting nothing but zeroes and that the docs agreed. :-(
#
# blkin = self._elapsed_ru('ru_inblock')
# blkout = self._elapsed_ru('ru_oublock')
# swap = self._elapsed_ru('ru_nswap')
# rss = self._end_rusage.ru_maxrss
# srss = self._end_rusage.ru_ixrss
# urss = self._end_rusage.ru_idrss
# usrss = self._end_rusage.ru_isrss
# TODO l10n on values
rows = (
(_('User CPU time'), '%0.3f msec' % utime),
(_('System CPU time'), '%0.3f msec' % stime),
(_('Total CPU time'), '%0.3f msec' % (utime + stime)),
(_('Elapsed time'), '%0.3f msec' % self.timer.total_time),
(_('Context switches'), '%d voluntary, %d involuntary' % (vcsw, ivcsw)),
# ('Memory use', '%d max RSS, %d shared, %d unshared' % (rss, srss, urss + usrss)),
# ('Page faults', '%d no i/o, %d requiring i/o' % (minflt, majflt)),
# ('Disk operations', '%d in, %d out, %d swapout' % (blkin, blkout, swap)),
)
context = self.context.copy()
context.update({
'rows': rows,
})
return render_to_string('debug_toolbar/panels/timer.html', context)
| bsd-3-clause |
jontrulson/mraa | src/doxy2swig.py | 12 | 15284 | #!/usr/bin/env python2
"""Doxygen XML to SWIG docstring converter.
Usage:
doxy2swig.py [options] input.xml output.i
Converts Doxygen generated XML files into a file containing docstrings
that can be used by SWIG-1.3.x. Note that you need to get SWIG
version > 1.3.23 or use Robin Dunn's docstring patch to be able to use
the resulting output.
input.xml is your doxygen generated XML file and output.i is where the
output will be written (the file will be clobbered).
"""
#
#
# This code is implemented using Mark Pilgrim's code as a guideline:
# http://www.faqs.org/docs/diveintopython/kgp_divein.html
#
# Author: Prabhu Ramachandran
# License: BSD style
#
# Thanks:
# Johan Hake: the include_function_definition feature
# Bill Spotz: bug reports and testing.
# Sebastian Henschel: Misc. enhancements.
#
#
from xml.dom import minidom
import re
import textwrap
import sys
import os.path
import optparse
def my_open_read(source):
if hasattr(source, "read"):
return source
else:
return open(source)
def my_open_write(dest):
if hasattr(dest, "write"):
return dest
else:
return open(dest, 'w')
class Doxy2SWIG:
"""Converts Doxygen generated XML files into a file containing
docstrings that can be used by SWIG-1.3.x that have support for
feature("docstring"). Once the data is parsed it is stored in
self.pieces.
"""
def __init__(self, src, include_function_definition=True, quiet=False):
"""Initialize the instance given a source object. `src` can
be a file or filename. If you do not want to include function
definitions from doxygen then set
`include_function_definition` to `False`. This is handy since
this allows you to use the swig generated function definition
using %feature("autodoc", [0,1]).
"""
f = my_open_read(src)
self.my_dir = os.path.dirname(f.name)
self.xmldoc = minidom.parse(f).documentElement
f.close()
self.pieces = []
self.pieces.append('\n// File: %s\n' %
os.path.basename(f.name))
self.space_re = re.compile(r'\s+')
self.lead_spc = re.compile(r'^(%feature\S+\s+\S+\s*?)"\s+(\S)')
self.multi = 0
self.ignores = ['inheritancegraph', 'param', 'listofallmembers',
'innerclass', 'name', 'declname', 'incdepgraph',
'invincdepgraph', 'programlisting', 'type',
'references', 'referencedby', 'location',
'collaborationgraph', 'reimplements',
'reimplementedby', 'derivedcompoundref',
'basecompoundref']
#self.generics = []
self.include_function_definition = include_function_definition
if not include_function_definition:
self.ignores.append('argsstring')
self.quiet = quiet
def generate(self):
"""Parses the file set in the initialization. The resulting
data is stored in `self.pieces`.
"""
self.parse(self.xmldoc)
def parse(self, node):
"""Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes.
"""
pm = getattr(self, "parse_%s" % node.__class__.__name__)
pm(node)
def parse_Document(self, node):
self.parse(node.documentElement)
def parse_Text(self, node):
txt = node.data
txt = txt.replace('\\', r'\\\\')
txt = txt.replace('"', r'\"')
# ignore pure whitespace
m = self.space_re.match(txt)
if m and len(m.group()) == len(txt):
# do nothing
pass
else:
self.add_text(textwrap.fill(txt, break_long_words=False))
def parse_Element(self, node):
"""Parse an `ELEMENT_NODE`. This calls specific
`do_<tagName>` handers for different elements. If no handler
is available the `generic_parse` method is called. All
tagNames specified in `self.ignores` are simply ignored.
"""
name = node.tagName
ignores = self.ignores
if name in ignores:
return
attr = "do_%s" % name
if hasattr(self, attr):
handlerMethod = getattr(self, attr)
handlerMethod(node)
else:
self.generic_parse(node)
#if name not in self.generics: self.generics.append(name)
def parse_Comment(self, node):
"""Parse a `COMMENT_NODE`. This does nothing for now."""
return
def add_text(self, value):
"""Adds text corresponding to `value` into `self.pieces`."""
if isinstance(value, (list, tuple)):
self.pieces.extend(value)
else:
self.pieces.append(value)
def get_specific_nodes(self, node, names):
"""Given a node and a sequence of strings in `names`, return a
dictionary containing the names as keys and child
`ELEMENT_NODEs`, that have a `tagName` equal to the name.
"""
nodes = [(x.tagName, x) for x in node.childNodes
if x.nodeType == x.ELEMENT_NODE and
x.tagName in names]
return dict(nodes)
def generic_parse(self, node, pad=0):
"""A Generic parser for arbitrary tags in a node.
Parameters:
- node: A node in the DOM.
- pad: `int` (default: 0)
If 0 the node data is not padded with newlines. If 1 it
appends a newline after parsing the childNodes. If 2 it
pads before and after the nodes are processed. Defaults to
0.
"""
npiece = 0
if pad:
npiece = len(self.pieces)
if pad == 2:
self.add_text('\n')
for n in node.childNodes:
self.parse(n)
if pad:
if len(self.pieces) > npiece:
self.add_text('\n')
def space_parse(self, node):
self.add_text(' ')
self.generic_parse(node)
do_ref = space_parse
do_emphasis = space_parse
do_bold = space_parse
do_computeroutput = space_parse
do_formula = space_parse
def do_compoundname(self, node):
self.add_text('\n\n')
data = node.firstChild.data
self.add_text('%%feature("docstring") %s "\n' % data)
def do_compounddef(self, node):
kind = node.attributes['kind'].value
if kind in ('class', 'struct'):
prot = node.attributes['prot'].value
if prot != 'public':
return
names = ('compoundname', 'briefdescription',
'detaileddescription', 'includes')
first = self.get_specific_nodes(node, names)
for n in names:
if first.has_key(n):
self.parse(first[n])
self.add_text(['";', '\n'])
for n in node.childNodes:
if n not in first.values():
self.parse(n)
elif kind in ('file', 'namespace'):
nodes = node.getElementsByTagName('sectiondef')
for n in nodes:
self.parse(n)
def do_includes(self, node):
self.add_text('C++ includes: ')
self.generic_parse(node, pad=1)
def do_parameterlist(self, node):
text = 'unknown'
for key, val in node.attributes.items():
if key == 'kind':
if val == 'param':
text = 'Parameters'
elif val == 'exception':
text = 'Exceptions'
elif val == 'retval':
text = 'Returns'
else:
text = val
break
self.add_text(['\n', '\n', text, ':', '\n'])
self.generic_parse(node, pad=1)
def do_para(self, node):
self.add_text('\n')
self.generic_parse(node, pad=1)
def do_parametername(self, node):
self.add_text('\n')
try:
data = node.firstChild.data
except AttributeError: # perhaps a <ref> tag in it
data = node.firstChild.firstChild.data
if data.find('Exception') != -1:
self.add_text(data)
else:
self.add_text("%s: " % data)
def do_parameterdefinition(self, node):
self.generic_parse(node, pad=1)
def do_detaileddescription(self, node):
self.generic_parse(node, pad=1)
def do_briefdescription(self, node):
self.generic_parse(node, pad=1)
def do_memberdef(self, node):
prot = node.attributes['prot'].value
id = node.attributes['id'].value
kind = node.attributes['kind'].value
tmp = node.parentNode.parentNode.parentNode
compdef = tmp.getElementsByTagName('compounddef')[0]
cdef_kind = compdef.attributes['kind'].value
if prot == 'public':
first = self.get_specific_nodes(node, ('definition', 'name'))
name = first['name'].firstChild.data
if name[:8] == 'operator': # Don't handle operators yet.
return
if not 'definition' in first or \
kind in ['variable', 'typedef']:
return
if self.include_function_definition:
defn = first['definition'].firstChild.data
else:
defn = ""
self.add_text('\n')
self.add_text('%feature("docstring") ')
anc = node.parentNode.parentNode
if cdef_kind in ('file', 'namespace'):
ns_node = anc.getElementsByTagName('innernamespace')
if not ns_node and cdef_kind == 'namespace':
ns_node = anc.getElementsByTagName('compoundname')
if ns_node:
ns = ns_node[0].firstChild.data
self.add_text(' %s::%s "\n%s' % (ns, name, defn))
else:
self.add_text(' %s "\n%s' % (name, defn))
elif cdef_kind in ('class', 'struct'):
# Get the full function name.
anc_node = anc.getElementsByTagName('compoundname')
cname = anc_node[0].firstChild.data
self.add_text(' %s::%s "\n%s' % (cname, name, defn))
for n in node.childNodes:
if n not in first.values():
self.parse(n)
self.add_text(['";', '\n'])
def do_definition(self, node):
data = node.firstChild.data
self.add_text('%s "\n%s' % (data, data))
def do_sectiondef(self, node):
kind = node.attributes['kind'].value
if kind in ('public-func', 'func', 'user-defined', ''):
self.generic_parse(node)
def do_header(self, node):
"""For a user defined section def a header field is present
which should not be printed as such, so we comment it in the
output."""
data = node.firstChild.data
self.add_text('\n/*\n %s \n*/\n' % data)
# If our immediate sibling is a 'description' node then we
# should comment that out also and remove it from the parent
# node's children.
parent = node.parentNode
idx = parent.childNodes.index(node)
if len(parent.childNodes) >= idx + 2:
nd = parent.childNodes[idx + 2]
if nd.nodeName == 'description':
nd = parent.removeChild(nd)
self.add_text('\n/*')
self.generic_parse(nd)
self.add_text('\n*/\n')
def do_simplesect(self, node):
kind = node.attributes['kind'].value
if kind == 'warning':
self.add_text(['\n', 'WARNING: '])
self.generic_parse(node)
elif kind == 'see':
self.add_text('\n')
self.add_text('See: ')
self.generic_parse(node)
else:
self.generic_parse(node)
def do_argsstring(self, node):
self.generic_parse(node, pad=1)
def do_member(self, node):
kind = node.attributes['kind'].value
refid = node.attributes['refid'].value
if kind == 'function' and refid[:9] == 'namespace':
self.generic_parse(node)
def do_doxygenindex(self, node):
self.multi = 1
comps = node.getElementsByTagName('compound')
for c in comps:
refid = c.attributes['refid'].value
fname = refid + '.xml'
if not os.path.exists(fname):
fname = os.path.join(self.my_dir, fname)
if not self.quiet:
print("parsing file: %s" % fname)
p = Doxy2SWIG(fname, self.include_function_definition, self.quiet)
p.generate()
self.pieces.extend(self.clean_pieces(p.pieces))
def write(self, fname):
o = my_open_write(fname)
if self.multi:
o.write("".join(x.encode('utf-8') for x in self.pieces))
else:
o.write("".join(self.clean_pieces(self.pieces)))
o.close()
def clean_pieces(self, pieces):
"""Cleans the list of strings given as `pieces`. It replaces
multiple newlines by a maximum of 2 and returns a new list.
It also wraps the paragraphs nicely.
"""
ret = []
count = 0
for i in pieces:
if i == '\n':
count = count + 1
else:
if i == '";':
if count:
ret.append('\n')
elif count > 2:
ret.append('\n\n')
elif count:
ret.append('\n' * count)
count = 0
ret.append(i)
_data = "".join(ret)
ret = []
for i in _data.split('\n\n'):
if i == 'Parameters:' or i == 'Exceptions:' or i == 'Returns:':
ret.extend([i, '\n' + '-' * len(i), '\n\n'])
elif i.find('// File:') > -1: # leave comments alone.
ret.extend([i, '\n'])
else:
_tmp = textwrap.fill(i.strip(), break_long_words=False)
_tmp = self.lead_spc.sub(r'\1"\2', _tmp)
ret.extend([_tmp, '\n\n'])
return ret
def convert(input, output, include_function_definition=True, quiet=False):
p = Doxy2SWIG(input, include_function_definition, quiet)
p.generate()
p.write(output)
def main():
usage = __doc__
parser = optparse.OptionParser(usage)
parser.add_option("-n", '--no-function-definition',
action='store_true',
default=False,
dest='func_def',
help='do not include doxygen function definitions')
parser.add_option("-q", '--quiet',
action='store_true',
default=False,
dest='quiet',
help='be quiet and minimize output')
options, args = parser.parse_args()
if len(args) != 2:
parser.error("error: no input and output specified")
convert(args[0], args[1], not options.func_def, options.quiet)
if __name__ == '__main__':
main()
| mit |
rocky/python2-trepan | trepan/lib/file.py | 1 | 3972 | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2013, 2015-2017
# Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Things related to file/module status"""
import os, pyficache, stat, sys
def file_list():
return list(set(pyficache.cached_files() +
list(pyficache.file2file_remap.keys())))
def is_compiled_py(filename):
"""
Given a file name, return True if the suffix is pyo or pyc (an
optimized bytecode file).
"""
return True if filename[-4:].lower() in ('.pyc', '.pyo') else False
READABLE_MASK = (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
def readable(path):
"""Test whether a path exists and is readable. Returns None for
broken symbolic links or a failing stat() and False if
the file exists but does not have read permission. True is returned
if the file is readable."""
try:
st = os.stat(path)
return 0 != st.st_mode & READABLE_MASK
except os.error:
return None
return True
def lookupmodule(name):
"""lookupmodule()->(module, file) translates a possibly incomplete
file or module name into an absolute file name. None can be
returned for either of the values positions of module or file when
no or module or file is found.
"""
if sys.modules.get(name):
return (sys.modules[name], sys.modules[name].__file__)
if os.path.isabs(name) and readable(name):
return (None, name)
f = os.path.join(sys.path[0], name)
if readable(f):
return (None, f)
root, ext = os.path.splitext(name)
if ext == '':
name = name + '.py'
pass
if os.path.isabs(name):
return (None, name)
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
pass
fullname = os.path.join(dirname, name)
if readable(fullname):
return (None, fullname)
pass
return (None, None)
def parse_position(errmsg, arg):
"""parse_position(errmsg, arg)->(fn, name, lineno)
Parse arg as [filename|module:]lineno
Make sure it works for C:\foo\bar.py:12
"""
colon = arg.rfind(':')
if colon >= 0:
filename = arg[:colon].rstrip()
m, f = lookupmodule(filename)
if not f:
errmsg("'%s' not found using sys.path" % filename)
return (None, None, None)
else:
filename = pyficache.resolve_name_to_path(f)
arg = arg[colon+1:].lstrip()
pass
try:
lineno = int(arg)
except TypeError:
errmsg("Bad line number: %s", str(arg))
return (None, filename, None)
return (None, filename, lineno)
return (None, None, None)
# Demo it
if __name__=='__main__':
import tempfile
print('readable("fdafsa"): %s' % readable('fdafdsa'))
for mode, can_read in [(stat.S_IRUSR, True), (stat.S_IWUSR, False)]:
f = tempfile.NamedTemporaryFile()
os.chmod(f.name, mode)
print("readable('%s'): %s" % (f.name, readable(f.name)))
f.close()
pass
print("lookupmodule('os.path'): %s" % repr(lookupmodule('os.path')))
print("lookupmodule(__file__): %s" % repr(lookupmodule(__file__)))
print("lookupmodule('fafdsadsa'): %s" % repr(lookupmodule('fafdsafdsa')))
pass
| gpl-3.0 |
jmolina116/kaldi-yesno-tutorial | steps/diagnostic/analyze_phone_length_stats.py | 3 | 12895 | #!/usr/bin/env python
# Copyright 2016 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0.
from __future__ import print_function
import argparse
import sys, os
from collections import defaultdict
parser = argparse.ArgumentParser(description="This script reads stats created in analyze_alignments.sh "
"to print information about phone lengths in alignments. It's principally "
"useful in order to see whether there is a reasonable amount of silence "
"at the beginning and ends of segments. The normal output of this script "
"is written to the standard output and is human readable (on crashes, "
"we'll print an error to stderr.")
parser.add_argument("--frequency-cutoff-percentage", type = float,
default = 0.5, help="Cutoff, expressed as a percentage "
"(between 0 and 100), of frequency at which we print stats "
"for a phone.")
parser.add_argument("lang",
help="Language directory, e.g. data/lang.")
args = parser.parse_args()
# set up phone_int2text to map from phone to printed form.
phone_int2text = {}
try:
f = open(args.lang + "/phones.txt", "r");
for line in f.readlines():
[ word, number] = line.split()
phone_int2text[int(number)] = word
f.close()
except:
sys.exit("analyze_phone_length_stats.py: error opening or reading {0}/phones.txt".format(
args.lang))
# this is a special case... for begin- and end-of-sentence stats,
# we group all nonsilence phones together.
phone_int2text[0] = 'nonsilence'
# populate the set 'nonsilence', which will contain the integer phone-ids of
# nonsilence phones (and disambig phones, which won't matter).
nonsilence = set(phone_int2text.keys())
nonsilence.remove(0)
try:
# open lang/phones/silence.csl-- while there are many ways of obtaining the
# silence/nonsilence phones, we read this because it's present in graph
# directories as well as lang directories.
filename = "{0}/phones/silence.csl".format(args.lang)
f = open(filename, "r")
line = f.readline()
f.close()
for silence_phone in line.split(":"):
nonsilence.remove(int(silence_phone))
except Exception as e:
sys.exit("analyze_phone_length_stats.py: error processing {0}/phones/silence.csl: {1}".format(
args.lang, str(e)))
# phone_length is a dict of dicts of dicts;
# it's indexed
# phone_length[boundary_type][
# phone_lengths[boundary_type] for boundary_type in [ 'begin', 'end', 'all' ] is
# a dict from a 2-tuple (phone, length) to a count of occurrences, where phone is
# an integer phone-id, and length is the length of the phone instance in frames.
# note: for the 'begin' and 'end' boundary-types, we group all nonsilence phones
# into phone-id zero.
phone_lengths = dict()
for boundary_type in [ 'begin', 'end', 'all' ]:
phone_lengths[boundary_type] = dict()
for p in phone_int2text.keys():
phone_lengths[boundary_type][p] = defaultdict(int)
# total_phones is a dict from boundary_type to total count [of phone occurrences]
total_phones = defaultdict(int)
# total_frames is a dict from boundary_type to total number of frames.
total_frames = defaultdict(int)
# total_frames is a dict from num-frames to count of num-utterances with that
# num-frames.
while True:
line = sys.stdin.readline()
if line == '':
break
a = line.split()
if len(a) != 4:
sys.exit("analyze_phone_length_stats.py: reading stdin, could not interpret line: " + line)
try:
count, boundary_type, phone, length = a
total_phones[boundary_type] += int(count)
total_frames[boundary_type] += int(count) * int(length)
phone_lengths[boundary_type][int(phone)][int(length)] += int(count)
if int(phone) in nonsilence:
nonsilence_phone = 0
phone_lengths[boundary_type][nonsilence_phone][int(length)] += int(count)
except Exception as e:
sys.exit("analyze_phone_length_stats.py: unexpected phone {0} "
"seen (lang directory mismatch?): {1}".format(phone, str(e)))
if len(phone_lengths) == 0:
sys.exit("analyze_phone_length_stats.py: read no input")
# work out the optional-silence phone
try:
f = open(args.lang + "/phones/optional_silence.int", "r")
optional_silence_phone = int(f.readline())
optional_silence_phone_text = phone_int2text[optional_silence_phone]
f.close()
if optional_silence_phone in nonsilence:
print("analyze_phone_length_stats.py: was expecting the optional-silence phone to "
"be a member of the silence phones, it is not. This script won't work correctly.")
except:
largest_count = 0
optional_silence_phone = 1
for p in phone_int2text.keys():
if p > 0 and not p in nonsilence:
this_count = sum([ l * c for l,c in phone_lengths['all'][p].items() ])
if this_count > largest_count:
largest_count = this_count
optional_silence_phone = p
optional_silence_phone_text = phone_int2text[optional_silence_phone]
print("analyze_phone_length_stats.py: could not get optional-silence phone from "
"{0}/phones/optional_silence.int, guessing that it's {1} from the stats. ".format(
args.lang, optional_silence_phone_text))
# If length_to_count is a map from length-in-frames to count,
# return the length-in-frames that equals the (fraction * 100)'th
# percentile of the distribution.
def GetPercentile(length_to_count, fraction):
total_phones = sum(length_to_count.values())
if total_phones == 0:
return 0
else:
items = sorted(length_to_count.items())
count_cutoff = int(fraction * total_phones)
cur_count_total = 0
for length,count in items:
assert count >= 0
cur_count_total += count
if cur_count_total >= count_cutoff:
return length
assert false # we shouldn't reach here.
def GetMean(length_to_count):
total_phones = sum(length_to_count.values())
if total_phones == 0:
return 0.0
total_frames = sum([ float(l * c) for l,c in length_to_count.items() ])
return total_frames / total_phones
# Analyze frequency, median and mean of optional-silence at beginning and end of utterances.
# The next block will print something like
# "At utterance begin, SIL is seen 15.0% of the time; when seen, duration (median, mean) is (5, 7.6) frames."
# "At utterance end, SIL is seen 14.6% of the time; when seen, duration (median, mean) is (4, 6.1) frames."
# This block will print warnings if silence is seen less than 80% of the time at utterance
# beginning and end.
for boundary_type in 'begin', 'end':
phone_to_lengths = phone_lengths[boundary_type]
num_utterances = total_phones[boundary_type]
assert num_utterances > 0
opt_sil_lengths = phone_to_lengths[optional_silence_phone]
frequency_percentage = sum(opt_sil_lengths.values()) * 100.0 / num_utterances
# The reason for this warning is that the tradition in speech recognition is
# to supply a little silence at the beginning and end of utterances... up to
# maybe half a second. If your database is not like this, you should know;
# you may want to mess with the segmentation to add more silence.
if frequency_percentage < 80.0:
print("analyze_phone_length_stats.py: WARNING: optional-silence {0} is seen only {1}% "
"of the time at utterance {2}. This may not be optimal.".format(
optional_silence_phone_text, frequency_percentage, boundary_type))
# this will control a sentence that we print..
boundary_to_text = { }
boundary_to_text['begin'] = 'At utterance begin'
boundary_to_text['end'] = 'At utterance end'
boundary_to_text['all'] = 'Overall'
# the next block prints lines like (to give some examples):
# At utterance begin, SIL accounts for 98.4% of phone occurrences, with duration (median, mean, 95-percentile) is (57,59.9,113) frames.
# ...
# At utterance end, nonsilence accounts for 4.2% of phone occurrences, with duration (median, mean, 95-percentile) is (13,13.3,22) frames.
# ...
# Overall, R_I accounts for 3.2% of phone occurrences, with duration (median, mean, 95-percentile) is (6,6.9,12) frames.
for boundary_type in 'begin', 'end', 'all':
phone_to_lengths = phone_lengths[boundary_type]
tot_num_phones = total_phones[boundary_type]
# sort the phones in decreasing order of count.
for phone,lengths in sorted(phone_to_lengths.items(), key = lambda x : -sum(x[1].values())):
frequency_percentage = sum(lengths.values()) * 100.0 / tot_num_phones
if frequency_percentage < args.frequency_cutoff_percentage:
continue
duration_median = GetPercentile(lengths, 0.5)
duration_percentile_95 = GetPercentile(lengths, 0.95)
duration_mean = GetMean(lengths)
text = boundary_to_text[boundary_type] # e.g. 'At utterance begin'.
try:
phone_text = phone_int2text[phone]
except:
sys.exit("analyze_phone_length_stats.py: phone {0} is not covered on phones.txt "
"(lang/alignment mismatch?)".format(phone))
print("{text}, {phone_text} accounts for {percent}% of phone occurrences, with "
"duration (median, mean, 95-percentile) is ({median},{mean},{percentile95}) frames.".format(
text = text, phone_text = phone_text,
percent = "%.1f" % frequency_percentage,
median = duration_median, mean = "%.1f" % duration_mean,
percentile95 = duration_percentile_95))
## Print stats on frequency and average length of word-internal optional-silences.
## For optional-silence only, subtract the begin and end-utterance stats from the 'all'
## stats, to get the stats excluding initial and final phones.
total_frames['internal'] = total_frames['all'] - total_frames['begin'] - total_frames['end']
total_phones['internal'] = total_phones['all'] - total_phones['begin'] - total_phones['end']
internal_opt_sil_phone_lengths = dict(phone_lengths['all'][optional_silence_phone])
for length in internal_opt_sil_phone_lengths.keys():
# subtract the counts for begin and end from the overall counts to get the
# word-internal count.
internal_opt_sil_phone_lengths[length] -= (phone_lengths['begin'][optional_silence_phone][length] +
phone_lengths['end'][optional_silence_phone][length])
if total_phones['internal'] != 0.0:
total_internal_optsil_frames = sum([ float(l * c) for l,c in internal_opt_sil_phone_lengths.items() ])
total_optsil_frames = sum([ float(l * c)
for l,c in phone_lengths['all'][optional_silence_phone].items() ])
opt_sil_internal_frame_percent = total_internal_optsil_frames * 100.0 / total_frames['internal']
opt_sil_total_frame_percent = total_optsil_frames * 100.0 / total_frames['all']
internal_frame_percent = total_frames['internal'] * 100.0 / total_frames['all']
print("The optional-silence phone {0} occupies {1}% of frames overall ".format(
optional_silence_phone_text, "%.1f" % opt_sil_total_frame_percent))
hours_total = total_frames['all'] / 360000.0;
hours_nonsil = (total_frames['all'] - total_optsil_frames) / 360000.0
print("Limiting the stats to the {0}% of frames not covered by an utterance-[begin/end] phone, "
"optional-silence {1} occupies {2}% of frames.".format("%.1f" % internal_frame_percent,
optional_silence_phone_text,
"%.1f" % opt_sil_internal_frame_percent))
print("Assuming 100 frames per second, the alignments represent {0} hours of data, "
"or {1} hours if {2} frames are excluded.".format(
"%.1f" % hours_total, "%.1f" % hours_nonsil, optional_silence_phone_text))
opt_sil_internal_phone_percent = (sum(internal_opt_sil_phone_lengths.values()) *
100.0 / total_phones['internal'])
duration_median = GetPercentile(internal_opt_sil_phone_lengths, 0.5)
duration_mean = GetMean(internal_opt_sil_phone_lengths)
duration_percentile_95 = GetPercentile(internal_opt_sil_phone_lengths, 0.95)
print("Utterance-internal optional-silences {0} comprise {1}% of utterance-internal phones, with duration "
"(median, mean, 95-percentile) = ({2},{3},{4})".format(
optional_silence_phone_text, "%.1f" % opt_sil_internal_phone_percent,
duration_median, "%0.1f" % duration_mean, duration_percentile_95))
| apache-2.0 |
burzillibus/RobHome | venv/lib/python2.7/site-packages/zmq/sugar/poll.py | 21 | 5324 | """0MQ polling related functions and classes."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import zmq
from zmq.backend import zmq_poll
from .constants import POLLIN, POLLOUT, POLLERR
#-----------------------------------------------------------------------------
# Polling related methods
#-----------------------------------------------------------------------------
class Poller(object):
"""A stateful poll interface that mirrors Python's built-in poll."""
sockets = None
_map = {}
def __init__(self):
self.sockets = []
self._map = {}
def __contains__(self, socket):
return socket in self._map
def register(self, socket, flags=POLLIN|POLLOUT):
"""p.register(socket, flags=POLLIN|POLLOUT)
Register a 0MQ socket or native fd for I/O monitoring.
register(s,0) is equivalent to unregister(s).
Parameters
----------
socket : zmq.Socket or native socket
A zmq.Socket or any Python object having a ``fileno()``
method that returns a valid file descriptor.
flags : int
The events to watch for. Can be POLLIN, POLLOUT or POLLIN|POLLOUT.
If `flags=0`, socket will be unregistered.
"""
if flags:
if socket in self._map:
idx = self._map[socket]
self.sockets[idx] = (socket, flags)
else:
idx = len(self.sockets)
self.sockets.append((socket, flags))
self._map[socket] = idx
elif socket in self._map:
# uregister sockets registered with no events
self.unregister(socket)
else:
# ignore new sockets with no events
pass
def modify(self, socket, flags=POLLIN|POLLOUT):
"""Modify the flags for an already registered 0MQ socket or native fd."""
self.register(socket, flags)
def unregister(self, socket):
"""Remove a 0MQ socket or native fd for I/O monitoring.
Parameters
----------
socket : Socket
The socket instance to stop polling.
"""
idx = self._map.pop(socket)
self.sockets.pop(idx)
# shift indices after deletion
for socket, flags in self.sockets[idx:]:
self._map[socket] -= 1
def poll(self, timeout=None):
"""Poll the registered 0MQ or native fds for I/O.
Parameters
----------
timeout : float, int
The timeout in milliseconds. If None, no `timeout` (infinite). This
is in milliseconds to be compatible with ``select.poll()``.
Returns
-------
events : list of tuples
The list of events that are ready to be processed.
This is a list of tuples of the form ``(socket, event)``, where the 0MQ Socket
or integer fd is the first element, and the poll event mask (POLLIN, POLLOUT) is the second.
It is common to call ``events = dict(poller.poll())``,
which turns the list of tuples into a mapping of ``socket : event``.
"""
if timeout is None or timeout < 0:
timeout = -1
elif isinstance(timeout, float):
timeout = int(timeout)
return zmq_poll(self.sockets, timeout=timeout)
def select(rlist, wlist, xlist, timeout=None):
"""select(rlist, wlist, xlist, timeout=None) -> (rlist, wlist, xlist)
Return the result of poll as a lists of sockets ready for r/w/exception.
This has the same interface as Python's built-in ``select.select()`` function.
Parameters
----------
timeout : float, int, optional
The timeout in seconds. If None, no timeout (infinite). This is in seconds to be
compatible with ``select.select()``.
rlist : list of sockets/FDs
sockets/FDs to be polled for read events
wlist : list of sockets/FDs
sockets/FDs to be polled for write events
xlist : list of sockets/FDs
sockets/FDs to be polled for error events
Returns
-------
(rlist, wlist, xlist) : tuple of lists of sockets (length 3)
Lists correspond to sockets available for read/write/error events respectively.
"""
if timeout is None:
timeout = -1
# Convert from sec -> us for zmq_poll.
# zmq_poll accepts 3.x style timeout in ms
timeout = int(timeout*1000.0)
if timeout < 0:
timeout = -1
sockets = []
for s in set(rlist + wlist + xlist):
flags = 0
if s in rlist:
flags |= POLLIN
if s in wlist:
flags |= POLLOUT
if s in xlist:
flags |= POLLERR
sockets.append((s, flags))
return_sockets = zmq_poll(sockets, timeout)
rlist, wlist, xlist = [], [], []
for s, flags in return_sockets:
if flags & POLLIN:
rlist.append(s)
if flags & POLLOUT:
wlist.append(s)
if flags & POLLERR:
xlist.append(s)
return rlist, wlist, xlist
#-----------------------------------------------------------------------------
# Symbols to export
#-----------------------------------------------------------------------------
__all__ = [ 'Poller', 'select' ]
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.