repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
s-hertel/ansible | lib/ansible/modules/wait_for_connection.py | 31 | 3017 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: wait_for_connection
short_description: Waits until remote system is reachable/usable
description:
- Waits for a total of C(timeout) seconds.
- Retries the transport connection after a timeout of C(connect_timeout).
- Tests the transport connection every C(sleep) seconds.
- This module makes use of internal ansible transport (and configuration) and the ping/win_ping module to guarantee correct end-to-end functioning.
- This module is also supported for Windows targets.
version_added: '2.3'
options:
connect_timeout:
description:
- Maximum number of seconds to wait for a connection to happen before closing and retrying.
type: int
default: 5
delay:
description:
- Number of seconds to wait before starting to poll.
type: int
default: 0
sleep:
description:
- Number of seconds to sleep between checks.
type: int
default: 1
timeout:
description:
- Maximum number of seconds to wait for.
type: int
default: 600
notes:
- This module is also supported for Windows targets.
seealso:
- module: ansible.builtin.wait_for
- module: ansible.windows.win_wait_for
- module: community.windows.win_wait_for_process
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Wait 600 seconds for target connection to become reachable/usable
wait_for_connection:
- name: Wait 300 seconds, but only start checking after 60 seconds
wait_for_connection:
delay: 60
timeout: 300
# Wake desktops, wait for them to become ready and continue playbook
- hosts: all
gather_facts: no
tasks:
- name: Send magic Wake-On-Lan packet to turn on individual systems
wakeonlan:
mac: '{{ mac }}'
broadcast: 192.168.0.255
delegate_to: localhost
- name: Wait for system to become reachable
wait_for_connection:
- name: Gather facts for first time
setup:
# Build a new VM, wait for it to become ready and continue playbook
- hosts: all
gather_facts: no
tasks:
- name: Clone new VM, if missing
vmware_guest:
hostname: '{{ vcenter_ipaddress }}'
name: '{{ inventory_hostname_short }}'
template: Windows 2012R2
customization:
hostname: '{{ vm_shortname }}'
runonce:
- powershell.exe -ExecutionPolicy Unrestricted -File C:\Windows\Temp\ConfigureRemotingForAnsible.ps1 -ForceNewSSLCert -EnableCredSSP
delegate_to: localhost
- name: Wait for system to become reachable over WinRM
wait_for_connection:
timeout: 900
- name: Gather facts for first time
setup:
'''
RETURN = r'''
elapsed:
description: The number of seconds that elapsed waiting for the connection to appear.
returned: always
type: float
sample: 23.1
'''
| gpl-3.0 |
Chilledheart/chromium | tools/telemetry/third_party/gsutilz/third_party/boto/boto/sns/__init__.py | 131 | 2117 | # Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# this is here for backward compatibility
# originally, the SNSConnection class was defined here
from boto.sns.connection import SNSConnection
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the SNS service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo` instances
"""
return get_regions('sns', connection_cls=SNSConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.sns.connection.SNSConnection`.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.sns.connection.SNSConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| bsd-3-clause |
Zanzibar82/plugin.video.pelisalacarta_ui.pureita | core/unpackerjs3.py | 4 | 3745 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Descifra el empaquetado javascript PACK de Dean Edwards
# No está bien probado, así que no garantizo que funcione aunque en los casos de este plugin va muy bien :)
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os.path
import sys
import os
from core import scrapertools
from core import config
from core import logger
def unpackjs(texto,tipoclaves=1):
logger.info("unpackjs")
patron = "return p\}(.*?)\.split"
matches = re.compile(patron,re.DOTALL).findall(texto)
scrapertools.printMatches(matches)
if len(matches)>0:
data = matches[0]
logger.info("[unpackerjs3.py] bloque funcion="+data)
else:
patron = "return p; }(.*?)\.split"
matches = re.compile(patron,re.DOTALL).findall(texto)
scrapertools.printMatches(matches)
if len(matches)>0:
data = matches[0]
else:
return ""
patron = "(.*)'([^']+)'"
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
cifrado = matches[0][0]
logger.info("[unpackerjs.py] cifrado="+cifrado)
logger.info("[unpackerjs.py] palabras="+matches[0][1])
descifrado = ""
# Crea el dicionario con la tabla de conversion
claves = []
if tipoclaves==1:
claves.extend(["0","1","2","3","4","5","6","7","8","9","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"])
claves.extend(["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"])
else:
claves.extend(["0","1","2","3","4","5","6","7","8","9","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"])
claves.extend(["10","11","12","13","14","15","16","17","18","19","1a","1b","1c","1d","1e","1f","1g","1h","1i","1j","1k","1l","1m","1n","1o","1p","1q","1r","1s","1t","1u","1v","1w","1x","1y","1z"])
claves.extend(["20","21","22","23","24","25","26","27","28","29","2a","2b","2c","2d","2e","2f","2g","2h","2i","2j","2k","2l","2m","2n","2o","2p","2q","2r","2s","2t","2u","2v","2w","2x","2y","2z"])
claves.extend(["30","31","32","33","34","35","36","37","38","39","3a","3b","3c","3d","3e","3f","3g","3h","3i","3j","3k","3l","3m","3n","3o","3p","3q","3r","3s","3t","3u","3v","3w","3x","3y","3z"])
palabras = matches[0][1].split("|")
diccionario = {}
i=0
for palabra in palabras:
#logger.info("i=%d" % i)
#logger.info("claves_i="+claves[i])
if palabra!="":
diccionario[claves[i]]=palabra
else:
diccionario[claves[i]]=claves[i]
logger.info(claves[i]+"="+palabra)
i=i+1
# Sustituye las palabras de la tabla de conversion
# Obtenido de http://rc98.net/multiple_replace
def lookup(match):
try:
return diccionario[match.group(0)]
except:
logger.info("[unpackerjs.py] Error al encontrar la clave "+match.group(0))
return ""
#lista = map(re.escape, diccionario)
# Invierte las claves, para que tengan prioridad las más largas
claves.reverse()
cadenapatron = '|'.join(claves)
#logger.info("[unpackerjs.py] cadenapatron="+cadenapatron)
compiled = re.compile(cadenapatron)
descifrado = compiled.sub(lookup, cifrado)
logger.info("descifrado="+descifrado);
descifrado = descifrado.replace("\\","")
logger.info("descifrado="+descifrado);
return descifrado
| gpl-3.0 |
encukou/freeipa | ipatests/test_cmdline/test_help.py | 4 | 5658 | # Authors: Petr Viktorin <pviktori@redhat.com>
#
# Copyright (C) 2012 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
from io import StringIO
import shutil
import errno
import six
from ipalib import api, errors
from ipaserver.plugins.user import user_add
import pytest
if six.PY3:
unicode = str
pytestmark = pytest.mark.needs_ipaapi
@pytest.mark.tier0
class CLITestContext:
"""Context manager that replaces stdout & stderr, and catches SystemExit
Whatever was printed to the streams is available in ``stdout`` and
``stderr`` attrributes once the with statement finishes.
When exception is given, asserts that exception is raised. The exception
will be available in the ``exception`` attribute.
"""
def __init__(self, exception=None):
self.exception = exception
def __enter__(self):
self.old_streams = sys.stdout, sys.stderr
self.stdout_fileobj = sys.stdout = StringIO()
self.stderr_fileobj = sys.stderr = StringIO()
return self
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout, sys.stderr = self.old_streams
self.stdout = self.stdout_fileobj.getvalue()
self.stderr = self.stderr_fileobj.getvalue()
self.stdout_fileobj.close()
self.stderr_fileobj.close()
if self.exception:
if not isinstance(exc_value, self.exception):
return False
self.exception = exc_value
return True
else:
return None
def test_ipa_help():
"""Test that `ipa help` only writes to stdout"""
with CLITestContext() as ctx:
return_value = api.Backend.cli.run(['help'])
assert return_value == 0
assert ctx.stderr == ''
def test_ipa_help_without_cache():
"""Test `ipa help` without schema cache"""
cache_dir = os.path.expanduser('~/.cache/ipa/schema/')
backup_dir = os.path.expanduser('~/.cache/ipa/schema.bak/')
shutil.rmtree(backup_dir, ignore_errors=True)
if os.path.isdir(cache_dir):
os.rename(cache_dir, backup_dir)
try:
with CLITestContext() as ctx:
return_value = api.Backend.cli.run(['help'])
assert return_value == 0
assert ctx.stderr == ''
finally:
shutil.rmtree(cache_dir, ignore_errors=True)
try:
os.rename(backup_dir, cache_dir)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def test_ipa_without_arguments():
"""Test that `ipa` errors out, and prints the help to stderr"""
with CLITestContext(exception=SystemExit) as ctx:
api.Backend.cli.run([])
assert ctx.exception.code == 2
assert ctx.stdout == ''
assert 'Error: Command not specified' in ctx.stderr
with CLITestContext() as help_ctx:
api.Backend.cli.run(['help'])
assert help_ctx.stdout in ctx.stderr
def test_bare_topic():
"""Test that `ipa user` errors out, and prints the help to stderr
This is because `user` is a topic, not a command, so `ipa user` doesn't
match our usage string. The help should be accessed using `ipa help user`.
"""
with CLITestContext(exception=errors.CommandError) as ctx:
api.Backend.cli.run(['user'])
assert ctx.exception.name == 'user'
assert ctx.stdout == ''
with CLITestContext() as help_ctx:
return_value = api.Backend.cli.run(['help', 'user'])
assert return_value == 0
assert help_ctx.stdout in ctx.stderr
def test_command_help():
"""Test that `help user-add` & `user-add -h` are equivalent and contain doc
"""
with CLITestContext() as help_ctx:
return_value = api.Backend.cli.run(['help', 'user-add'])
assert return_value == 0
assert help_ctx.stderr == ''
with CLITestContext(exception=SystemExit) as h_ctx:
api.Backend.cli.run(['user-add', '-h'])
assert h_ctx.exception.code == 0
assert h_ctx.stderr == ''
assert h_ctx.stdout == help_ctx.stdout
assert unicode(user_add.doc) in help_ctx.stdout
def test_ambiguous_command_or_topic():
"""Test that `help ping` & `ping -h` are NOT equivalent
One is a topic, the other is a command
"""
with CLITestContext() as help_ctx:
return_value = api.Backend.cli.run(['help', 'ping'])
assert return_value == 0
assert help_ctx.stderr == ''
with CLITestContext(exception=SystemExit) as h_ctx:
api.Backend.cli.run(['ping', '-h'])
assert h_ctx.exception.code == 0
assert h_ctx.stderr == ''
assert h_ctx.stdout != help_ctx.stdout
def test_multiline_description():
"""Test that all of a multi-line command description appears in output
"""
# This assumes trust_add has multiline doc. Ensure it is so.
assert '\n\n' in unicode(api.Command.trust_add.doc).strip()
with CLITestContext(exception=SystemExit) as help_ctx:
api.Backend.cli.run(['trust-add', '-h'])
assert unicode(api.Command.trust_add.doc).strip() in help_ctx.stdout
| gpl-3.0 |
sudheesh001/oh-mainline | vendor/packages/twisted/twisted/web/iweb.py | 18 | 14513 | # -*- test-case-name: twisted.web.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Interface definitions for L{twisted.web}.
@var UNKNOWN_LENGTH: An opaque object which may be used as the value of
L{IBodyProducer.length} to indicate that the length of the entity
body is not known in advance.
"""
from zope.interface import Interface, Attribute
from twisted.internet.interfaces import IPushProducer
from twisted.cred.credentials import IUsernameDigestHash
class IRequest(Interface):
"""
An HTTP request.
@since: 9.0
"""
method = Attribute("A C{str} giving the HTTP method that was used.")
uri = Attribute(
"A C{str} giving the full encoded URI which was requested (including "
"query arguments).")
path = Attribute(
"A C{str} giving the encoded query path of the request URI.")
args = Attribute(
"A mapping of decoded query argument names as C{str} to "
"corresponding query argument values as C{list}s of C{str}. "
"For example, for a URI with C{'foo=bar&foo=baz&quux=spam'} "
"for its query part, C{args} will be C{{'foo': ['bar', 'baz'], "
"'quux': ['spam']}}.")
received_headers = Attribute(
"Backwards-compatibility access to C{requestHeaders}. Use "
"C{requestHeaders} instead. C{received_headers} behaves mostly "
"like a C{dict} and does not provide access to all header values.")
requestHeaders = Attribute(
"A L{http_headers.Headers} instance giving all received HTTP request "
"headers.")
headers = Attribute(
"Backwards-compatibility access to C{responseHeaders}. Use"
"C{responseHeaders} instead. C{headers} behaves mostly like a "
"C{dict} and does not provide access to all header values nor "
"does it allow multiple values for one header to be set.")
responseHeaders = Attribute(
"A L{http_headers.Headers} instance holding all HTTP response "
"headers to be sent.")
def getHeader(key):
"""
Get an HTTP request header.
@type key: C{str}
@param key: The name of the header to get the value of.
@rtype: C{str} or C{NoneType}
@return: The value of the specified header, or C{None} if that header
was not present in the request.
"""
def getCookie(key):
"""
Get a cookie that was sent from the network.
"""
def getAllHeaders():
"""
Return dictionary mapping the names of all received headers to the last
value received for each.
Since this method does not return all header information,
C{requestHeaders.getAllRawHeaders()} may be preferred.
"""
def getRequestHostname():
"""
Get the hostname that the user passed in to the request.
This will either use the Host: header (if it is available) or the
host we are listening on if the header is unavailable.
@returns: the requested hostname
@rtype: C{str}
"""
def getHost():
"""
Get my originally requesting transport's host.
@return: An L{IAddress}.
"""
def getClientIP():
"""
Return the IP address of the client who submitted this request.
@returns: the client IP address or C{None} if the request was submitted
over a transport where IP addresses do not make sense.
@rtype: C{str} or L{NoneType}
"""
def getClient():
"""
Return the hostname of the IP address of the client who submitted this
request, if possible.
This method is B{deprecated}. See L{getClientIP} instead.
@rtype: L{NoneType} or L{str}
@return: The canonical hostname of the client, as determined by
performing a name lookup on the IP address of the client.
"""
def getUser():
"""
Return the HTTP user sent with this request, if any.
If no user was supplied, return the empty string.
@returns: the HTTP user, if any
@rtype: C{str}
"""
def getPassword():
"""
Return the HTTP password sent with this request, if any.
If no password was supplied, return the empty string.
@returns: the HTTP password, if any
@rtype: C{str}
"""
def isSecure():
"""
Return True if this request is using a secure transport.
Normally this method returns True if this request's HTTPChannel
instance is using a transport that implements ISSLTransport.
This will also return True if setHost() has been called
with ssl=True.
@returns: True if this request is secure
@rtype: C{bool}
"""
def getSession(sessionInterface=None):
"""
Look up the session associated with this request or create a new one if
there is not one.
@return: The L{Session} instance identified by the session cookie in
the request, or the C{sessionInterface} component of that session
if C{sessionInterface} is specified.
"""
def URLPath():
"""
@return: A L{URLPath} instance which identifies the URL for which this
request is.
"""
def prePathURL():
"""
@return: At any time during resource traversal, a L{str} giving an
absolute URL to the most nested resource which has yet been
reached.
"""
def rememberRootURL():
"""
Remember the currently-processed part of the URL for later
recalling.
"""
def getRootURL():
"""
Get a previously-remembered URL.
"""
# Methods for outgoing response
def finish():
"""
Indicate that the response to this request is complete.
"""
def write(data):
"""
Write some data to the body of the response to this request. Response
headers are written the first time this method is called, after which
new response headers may not be added.
"""
def addCookie(k, v, expires=None, domain=None, path=None, max_age=None, comment=None, secure=None):
"""
Set an outgoing HTTP cookie.
In general, you should consider using sessions instead of cookies, see
L{twisted.web.server.Request.getSession} and the
L{twisted.web.server.Session} class for details.
"""
def setResponseCode(code, message=None):
"""
Set the HTTP response code.
"""
def setHeader(k, v):
"""
Set an HTTP response header. Overrides any previously set values for
this header.
@type name: C{str}
@param name: The name of the header for which to set the value.
@type value: C{str}
@param value: The value to set for the named header.
"""
def redirect(url):
"""
Utility function that does a redirect.
The request should have finish() called after this.
"""
def setLastModified(when):
"""
Set the C{Last-Modified} time for the response to this request.
If I am called more than once, I ignore attempts to set Last-Modified
earlier, only replacing the Last-Modified time if it is to a later
value.
If I am a conditional request, I may modify my response code to
L{NOT_MODIFIED} if appropriate for the time given.
@param when: The last time the resource being returned was modified, in
seconds since the epoch.
@type when: C{int}, C{long} or C{float}
@return: If I am a C{If-Modified-Since} conditional request and the
time given is not newer than the condition, I return
L{http.CACHED<CACHED>} to indicate that you should write no body.
Otherwise, I return a false value.
"""
def setETag(etag):
"""
Set an C{entity tag} for the outgoing response.
That's "entity tag" as in the HTTP/1.1 C{ETag} header, "used for
comparing two or more entities from the same requested resource."
If I am a conditional request, I may modify my response code to
L{NOT_MODIFIED} or L{PRECONDITION_FAILED}, if appropriate for the tag
given.
@param etag: The entity tag for the resource being returned.
@type etag: C{str}
@return: If I am a C{If-None-Match} conditional request and the tag
matches one in the request, I return L{http.CACHED<CACHED>} to
indicate that you should write no body. Otherwise, I return a
false value.
"""
def setHost(host, port, ssl=0):
"""
Change the host and port the request thinks it's using.
This method is useful for working with reverse HTTP proxies (e.g. both
Squid and Apache's mod_proxy can do this), when the address the HTTP
client is using is different than the one we're listening on.
For example, Apache may be listening on https://www.example.com, and
then forwarding requests to http://localhost:8080, but we don't want
HTML produced by Twisted to say 'http://localhost:8080', they should
say 'https://www.example.com', so we do::
request.setHost('www.example.com', 443, ssl=1)
"""
class ICredentialFactory(Interface):
"""
A credential factory defines a way to generate a particular kind of
authentication challenge and a way to interpret the responses to these
challenges. It creates L{ICredentials} providers from responses. These
objects will be used with L{twisted.cred} to authenticate an authorize
requests.
"""
scheme = Attribute(
"A C{str} giving the name of the authentication scheme with which "
"this factory is associated. For example, C{'basic'} or C{'digest'}.")
def getChallenge(request):
"""
Generate a new challenge to be sent to a client.
@type peer: L{twisted.web.http.Request}
@param peer: The request the response to which this challenge will be
included.
@rtype: C{dict}
@return: A mapping from C{str} challenge fields to associated C{str}
values.
"""
def decode(response, request):
"""
Create a credentials object from the given response.
@type response: C{str}
@param response: scheme specific response string
@type request: L{twisted.web.http.Request}
@param request: The request being processed (from which the response
was taken).
@raise twisted.cred.error.LoginFailed: If the response is invalid.
@rtype: L{twisted.cred.credentials.ICredentials} provider
@return: The credentials represented by the given response.
"""
class IBodyProducer(IPushProducer):
"""
Objects which provide L{IBodyProducer} write bytes to an object which
provides L{IConsumer} by calling its C{write} method repeatedly.
L{IBodyProducer} providers may start producing as soon as they have
an L{IConsumer} provider. That is, they should not wait for a
C{resumeProducing} call to begin writing data.
L{IConsumer.unregisterProducer} must not be called. Instead, the
L{Deferred} returned from C{startProducing} must be fired when all bytes
have been written.
L{IConsumer.write} may synchronously invoke any of C{pauseProducing},
C{resumeProducing}, or C{stopProducing}. These methods must be implemented
with this in mind.
@since: 9.0
"""
# Despite the restrictions above and the additional requirements of
# stopProducing documented below, this interface still needs to be an
# IPushProducer subclass. Providers of it will be passed to IConsumer
# providers which only know about IPushProducer and IPullProducer, not
# about this interface. This interface needs to remain close enough to one
# of those interfaces for consumers to work with it.
length = Attribute(
"""
C{length} is a C{int} indicating how many bytes in total this
L{IBodyProducer} will write to the consumer or L{UNKNOWN_LENGTH}
if this is not known in advance.
""")
def startProducing(consumer):
"""
Start producing to the given L{IConsumer} provider.
@return: A L{Deferred} which fires with C{None} when all bytes have
been produced or with a L{Failure} if there is any problem before
all bytes have been produced.
"""
def stopProducing():
"""
In addition to the standard behavior of L{IProducer.stopProducing}
(stop producing data), make sure the L{Deferred} returned by
C{startProducing} is never fired.
"""
class IRenderable(Interface):
"""
An L{IRenderable} is an object that may be rendered by the
L{twisted.web.template} templating system.
"""
def lookupRenderMethod(name):
"""
Look up and return the render method associated with the given name.
@type name: C{str}
@param name: The value of a render directive encountered in the
document returned by a call to L{IRenderable.render}.
@return: A two-argument callable which will be invoked with the request
being responded to and the tag object on which the render directive
was encountered.
"""
def render(request):
"""
Get the document for this L{IRenderable}.
@type request: L{IRequest} provider or L{NoneType}
@param request: The request in response to which this method is being
invoked.
@return: An object which can be flattened.
"""
class ITemplateLoader(Interface):
"""
A loader for templates; something usable as a value for
L{twisted.web.template.Element}'s C{loader} attribute.
"""
def load():
"""
Load a template suitable for rendering.
@return: a C{list} of C{list}s, C{unicode} objects, C{Element}s and
other L{IRenderable} providers.
"""
UNKNOWN_LENGTH = u"twisted.web.iweb.UNKNOWN_LENGTH"
__all__ = [
"IUsernameDigestHash", "ICredentialFactory", "IRequest",
"IBodyProducer", "IRenderable",
"UNKNOWN_LENGTH"]
| agpl-3.0 |
qrkourier/ansible | lib/ansible/modules/cloud/ovirt/ovirt_vmpools.py | 75 | 7594 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_vmpools
short_description: Module to manage VM pools in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage VM pools in oVirt/RHV."
options:
name:
description:
- "Name of the VM pool to manage."
required: true
state:
description:
- "Should the VM pool be present/absent."
- "Note that when C(state) is I(absent) all VMs in VM pool are stopped and removed."
choices: ['present', 'absent']
default: present
template:
description:
- "Name of the template, which will be used to create VM pool."
description:
description:
- "Description of the VM pool."
cluster:
description:
- "Name of the cluster, where VM pool should be created."
type:
description:
- "Type of the VM pool. Either manual or automatic."
- "C(manual) - The administrator is responsible for explicitly returning the virtual machine to the pool.
The virtual machine reverts to the original base image after the administrator returns it to the pool."
- "C(Automatic) - When the virtual machine is shut down, it automatically reverts to its base image and
is returned to the virtual machine pool."
- "Default value is set by engine."
choices: ['manual', 'automatic']
vm_per_user:
description:
- "Maximum number of VMs a single user can attach to from this pool."
- "Default value is set by engine."
prestarted:
description:
- "Number of pre-started VMs defines the number of VMs in run state, that are waiting
to be attached to Users."
- "Default value is set by engine."
vm_count:
description:
- "Number of VMs in the pool."
- "Default value is set by engine."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create VM pool from template
- ovirt_vmpools:
cluster: mycluster
name: myvmpool
template: rhel7
vm_count: 2
prestarted: 2
vm_per_user: 1
# Remove vmpool, note that all VMs in pool will be stopped and removed:
- ovirt_vmpools:
state: absent
name: myvmpool
'''
RETURN = '''
id:
description: ID of the VM pool which is managed
returned: On success if VM pool is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
vm_pool:
description: "Dictionary of all the VM pool attributes. VM pool attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/vm_pool."
returned: On success if VM pool is found.
type: dict
'''
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_params,
check_sdk,
create_connection,
equal,
get_link_name,
ovirt_full_argument_spec,
wait,
)
class VmPoolsModule(BaseModule):
def build_entity(self):
return otypes.VmPool(
name=self._module.params['name'],
description=self._module.params['description'],
comment=self._module.params['comment'],
cluster=otypes.Cluster(
name=self._module.params['cluster']
) if self._module.params['cluster'] else None,
template=otypes.Template(
name=self._module.params['template']
) if self._module.params['template'] else None,
max_user_vms=self._module.params['vm_per_user'],
prestarted_vms=self._module.params['prestarted'],
size=self._module.params['vm_count'],
type=otypes.VmPoolType(
self._module.params['type']
) if self._module.params['type'] else None,
)
def update_check(self, entity):
return (
equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('comment'), entity.comment) and
equal(self._module.params.get('vm_per_user'), entity.max_user_vms) and
equal(self._module.params.get('prestarted'), entity.prestarted_vms) and
equal(self._module.params.get('vm_count'), entity.size)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, required=True),
template=dict(default=None),
cluster=dict(default=None),
description=dict(default=None),
comment=dict(default=None),
vm_per_user=dict(default=None, type='int'),
prestarted=dict(default=None, type='int'),
vm_count=dict(default=None, type='int'),
type=dict(default=None, choices=['automatic', 'manual']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
vm_pools_service = connection.system_service().vm_pools_service()
vm_pools_module = VmPoolsModule(
connection=connection,
module=module,
service=vm_pools_service,
)
state = module.params['state']
if state == 'present':
ret = vm_pools_module.create()
# Wait for all VM pool VMs to be created:
if module.params['wait']:
vms_service = connection.system_service().vms_service()
for vm in vms_service.list(search='pool=%s' % module.params['name']):
wait(
service=vms_service.service(vm.id),
condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
timeout=module.params['timeout'],
)
elif state == 'absent':
ret = vm_pools_module.remove()
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
| gpl-3.0 |
yarikoptic/pystatsmodels | statsmodels/graphics/tests/test_functional.py | 3 | 2815 | import numpy as np
from numpy.testing import dec, assert_equal, assert_almost_equal
from statsmodels.graphics.functional import \
banddepth, fboxplot, rainbowplot
try:
import matplotlib.pyplot as plt
import matplotlib
if matplotlib.__version__ < '1':
raise
have_matplotlib = True
except:
have_matplotlib = False
def test_banddepth_BD2():
xx = np.arange(500) / 150.
y1 = 1 + 0.5 * np.sin(xx)
y2 = 0.3 + np.sin(xx + np.pi/6)
y3 = -0.5 + np.sin(xx + np.pi/6)
y4 = -1 + 0.3 * np.cos(xx + np.pi/6)
data = np.asarray([y1, y2, y3, y4])
depth = banddepth(data, method='BD2')
expected_depth = [0.5, 5./6, 5./6, 0.5]
assert_almost_equal(depth, expected_depth)
## Plot to visualize why we expect this output
#fig = plt.figure()
#ax = fig.add_subplot(111)
#for ii, yy in enumerate([y1, y2, y3, y4]):
# ax.plot(xx, yy, label="y%s" % ii)
#ax.legend()
#plt.show()
def test_banddepth_MBD():
xx = np.arange(5001) / 5000.
y1 = np.zeros(xx.shape)
y2 = 2 * xx - 1
y3 = np.ones(xx.shape) * 0.5
y4 = np.ones(xx.shape) * -0.25
data = np.asarray([y1, y2, y3, y4])
depth = banddepth(data, method='MBD')
expected_depth = [5./6, (2*(0.75-3./8)+3)/6, 3.5/6, (2*3./8+3)/6]
assert_almost_equal(depth, expected_depth, decimal=4)
@dec.skipif(not have_matplotlib)
def test_fboxplot_rainbowplot():
"""Test fboxplot and rainbowplot together, is much faster."""
def harmfunc(t):
"""Test function, combination of a few harmonic terms."""
# Constant, 0 with p=0.9, 1 with p=1 - for creating outliers
ci = int(np.random.random() > 0.9)
a1i = np.random.random() * 0.05
a2i = np.random.random() * 0.05
b1i = (0.15 - 0.1) * np.random.random() + 0.1
b2i = (0.15 - 0.1) * np.random.random() + 0.1
func = (1 - ci) * (a1i * np.sin(t) + a2i * np.cos(t)) + \
ci * (b1i * np.sin(t) + b2i * np.cos(t))
return func
np.random.seed(1234567)
# Some basic test data, Model 6 from Sun and Genton.
t = np.linspace(0, 2 * np.pi, 250)
data = []
for ii in range(20):
data.append(harmfunc(t))
# fboxplot test
fig = plt.figure()
ax = fig.add_subplot(111)
_, depth, ix_depth, ix_outliers = fboxplot(data, wfactor=2, ax=ax)
ix_expected = np.array([13, 4, 15, 19, 8, 6, 3, 16, 9, 7, 1, 5, 2,
12, 17, 11, 14, 10, 0, 18])
assert_equal(ix_depth, ix_expected)
ix_expected2 = np.array([2, 11, 17, 18])
assert_equal(ix_outliers, ix_expected2)
plt.close(fig)
# rainbowplot test (re-uses depth variable)
xdata = np.arange(data[0].size)
fig = rainbowplot(data, xdata=xdata, depth=depth, cmap=plt.cm.rainbow)
plt.close(fig)
| bsd-3-clause |
pombredanne/pushmanager | core/mail.py | 6 | 3118 | # -*- coding: utf-8 -*-
import email.mime.text
from Queue import Queue, Empty
import smtplib
from threading import Thread
from core.settings import Settings
class MailQueue(object):
message_queue = Queue()
worker_thread = None
smtp = None
@classmethod
def start_worker(cls):
if cls.worker_thread is not None:
return
cls.worker_thread = Thread(target=cls.process_queue, name='mail-queue')
cls.worker_thread.daemon = True
cls.worker_thread.start()
@classmethod
def process_queue(cls):
# We double-nest 'while True' blocks here so that we can
# try to re-use the same SMTP server connection for batches
# of emails, but not keep it open for long periods without
# any emails to send.
while True:
# Blocks indefinitely
send_email_args = cls.message_queue.get(True)
cls.smtp = smtplib.SMTP('127.0.0.1', 25)
while True:
cls._send_email(*send_email_args)
try:
# Only blocks for 5 seconds max, raises Empty if still nothing
send_email_args = cls.message_queue.get(True, 5)
except Empty:
# Done with this batch, use a blocking call to wait for the next
break
cls.smtp.quit()
@classmethod
def _send_email(cls, recipient, message, subject, from_email):
msg = email.mime.text.MIMEText(message, 'html')
msg['Subject'] = subject
msg['From'] = from_email
msg['To'] = recipient
cls.smtp.sendmail(from_email, [recipient], msg.as_string())
other_recipients = set(Settings['mail']['notifyall']) - set([recipient])
if other_recipients:
msg = email.mime.text.MIMEText(message, 'html')
msg['Subject'] = '[all] %s' % subject
msg['From'] = Settings['mail']['from']
msg['To'] = ', '.join(other_recipients)
cls.smtp.sendmail(from_email, list(other_recipients), msg.as_string())
cls.message_queue.task_done()
@classmethod
def enqueue_email(cls, recipients, message, subject='', from_email=Settings['mail']['from']):
if isinstance(recipients, (list,set,tuple)):
# Flatten non-string iterables
for recipient in recipients:
cls.enqueue_email(recipient, message, subject, from_email)
elif isinstance(recipients, (str,unicode)):
cls.message_queue.put( (recipients, message, subject, from_email) )
else:
raise ValueError('Recipient(s) must be a string or iterable of strings')
@classmethod
def enqueue_user_email(cls, recipients, *args, **kwargs):
"""Transforms a list of 'user' to 'user@default_domain.com', then invokes enqueue_email."""
domain = Settings['mail']['default_domain']
recipients = ['%s@%s' % (recipient, domain) if '@' not in recipient else recipient for recipient in recipients]
return cls.enqueue_email(recipients, *args, **kwargs)
__all__ = ['MailQueue']
| apache-2.0 |
neocogent/electrum | setup.py | 1 | 3017 | #!/usr/bin/env python3
# python setup.py sdist --format=zip,gztar
import os
import sys
import platform
import importlib.util
import argparse
import subprocess
from setuptools import setup, find_packages
from setuptools.command.install import install
MIN_PYTHON_VERSION = "3.6.1"
_min_python_version_tuple = tuple(map(int, (MIN_PYTHON_VERSION.split("."))))
if sys.version_info[:3] < _min_python_version_tuple:
sys.exit("Error: Electrum requires Python version >= %s..." % MIN_PYTHON_VERSION)
with open('contrib/requirements/requirements.txt') as f:
requirements = f.read().splitlines()
with open('contrib/requirements/requirements-hw.txt') as f:
requirements_hw = f.read().splitlines()
# load version.py; needlessly complicated alternative to "imp.load_source":
version_spec = importlib.util.spec_from_file_location('version', 'electrum/version.py')
version_module = version = importlib.util.module_from_spec(version_spec)
version_spec.loader.exec_module(version_module)
data_files = []
if platform.system() in ['Linux', 'FreeBSD', 'DragonFly']:
parser = argparse.ArgumentParser()
parser.add_argument('--root=', dest='root_path', metavar='dir', default='/')
opts, _ = parser.parse_known_args(sys.argv[1:])
usr_share = os.path.join(sys.prefix, "share")
icons_dirname = 'pixmaps'
if not os.access(opts.root_path + usr_share, os.W_OK) and \
not os.access(opts.root_path, os.W_OK):
icons_dirname = 'icons'
if 'XDG_DATA_HOME' in os.environ.keys():
usr_share = os.environ['XDG_DATA_HOME']
else:
usr_share = os.path.expanduser('~/.local/share')
data_files += [
(os.path.join(usr_share, 'applications/'), ['electrum.desktop']),
(os.path.join(usr_share, icons_dirname), ['electrum/gui/icons/electrum.png']),
]
extras_require = {
'hardware': requirements_hw,
'fast': ['pycryptodomex'],
'gui': ['pyqt5'],
}
extras_require['full'] = [pkg for sublist in list(extras_require.values()) for pkg in sublist]
setup(
name="Electrum",
version=version.ELECTRUM_VERSION,
python_requires='>={}'.format(MIN_PYTHON_VERSION),
install_requires=requirements,
extras_require=extras_require,
packages=[
'electrum',
'electrum.gui',
'electrum.gui.qt',
'electrum.plugins',
] + [('electrum.plugins.'+pkg) for pkg in find_packages('electrum/plugins')],
package_dir={
'electrum': 'electrum'
},
package_data={
'': ['*.txt', '*.json', '*.ttf', '*.otf'],
'electrum': [
'wordlist/*.txt',
'locale/*/LC_MESSAGES/electrum.mo',
],
'electrum.gui': [
'icons/*',
],
},
scripts=['electrum/electrum'],
data_files=data_files,
description="Lightweight Bitcoin Wallet",
author="Thomas Voegtlin",
author_email="thomasv@electrum.org",
license="MIT Licence",
url="https://electrum.org",
long_description="""Lightweight Bitcoin Wallet""",
)
| mit |
mmnelemane/nova | nova/virt/firewall.py | 56 | 21942 | # Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2011 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from nova.compute import utils as compute_utils
from nova import context
from nova.i18n import _LI
from nova.network import linux_net
from nova import objects
from nova import utils
from nova.virt import netutils
LOG = logging.getLogger(__name__)
firewall_opts = [
cfg.StrOpt('firewall_driver',
help='Firewall driver '
'(defaults to hypervisor specific iptables driver)'),
cfg.BoolOpt('allow_same_net_traffic',
default=True,
help='Whether to allow network traffic from same network'),
]
CONF = cfg.CONF
CONF.register_opts(firewall_opts)
CONF.import_opt('use_ipv6', 'nova.netconf')
def load_driver(default, *args, **kwargs):
fw_class = importutils.import_class(CONF.firewall_driver or default)
return fw_class(*args, **kwargs)
class FirewallDriver(object):
"""Firewall Driver base class.
Defines methods that any driver providing security groups
and provider firewall functionality should implement.
"""
def __init__(self, virtapi):
self._virtapi = virtapi
def prepare_instance_filter(self, instance, network_info):
"""Prepare filters for the instance.
At this point, the instance isn't running yet.
"""
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance."""
raise NotImplementedError()
def apply_instance_filter(self, instance, network_info):
"""Apply instance filter.
Once this method returns, the instance should be firewalled
appropriately. This method should as far as possible be a
no-op. It's vastly preferred to get everything set up in
prepare_instance_filter.
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""Refresh security group rules from data store
Gets called when a rule has been added to or removed from
the security group.
"""
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""Refresh security group members from data store
Gets called when an instance gets added to or removed from
the security group.
"""
raise NotImplementedError()
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules from data store
Gets called when an instance gets added to or removed from
the security group the instance is a member of or if the
group gains or looses a rule.
"""
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""Refresh common rules for all hosts/instances from data store.
Gets called when a rule has been added to or removed from
the list of rules (via admin api).
"""
raise NotImplementedError()
def setup_basic_filtering(self, instance, network_info):
"""Create rules to block spoofing and allow dhcp.
This gets called when spawning an instance, before
:py:meth:`prepare_instance_filter`.
"""
raise NotImplementedError()
def instance_filter_exists(self, instance, network_info):
"""Check nova-instance-instance-xxx exists."""
raise NotImplementedError()
class IptablesFirewallDriver(FirewallDriver):
"""Driver which enforces security groups through iptables rules."""
def __init__(self, virtapi, **kwargs):
super(IptablesFirewallDriver, self).__init__(virtapi)
self.iptables = linux_net.iptables_manager
self.instance_info = {}
self.basically_filtered = False
# Flags for DHCP request rule
self.dhcp_create = False
self.dhcp_created = False
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
self.iptables.ipv6['filter'].add_chain('sg-fallback')
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def setup_basic_filtering(self, instance, network_info):
pass
def apply_instance_filter(self, instance, network_info):
"""No-op. Everything is done in prepare_instance_filter."""
pass
def filter_defer_apply_on(self):
self.iptables.defer_apply_on()
def filter_defer_apply_off(self):
self.iptables.defer_apply_off()
def unfilter_instance(self, instance, network_info):
if self.instance_info.pop(instance.id, None):
self.remove_filters_for_instance(instance)
self.iptables.apply()
else:
LOG.info(_LI('Attempted to unfilter instance which is not '
'filtered'), instance=instance)
def prepare_instance_filter(self, instance, network_info):
self.instance_info[instance.id] = (instance, network_info)
ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
self.add_filters_for_instance(instance, network_info, ipv4_rules,
ipv6_rules)
LOG.debug('Filters added to instance: %s', instance.id,
instance=instance)
self.refresh_provider_fw_rules()
LOG.debug('Provider Firewall Rules refreshed: %s', instance.id,
instance=instance)
# Ensure that DHCP request rule is updated if necessary
if (self.dhcp_create and not self.dhcp_created):
self.iptables.ipv4['filter'].add_rule(
'INPUT',
'-s 0.0.0.0/32 -d 255.255.255.255/32 '
'-p udp -m udp --sport 68 --dport 67 -j ACCEPT')
self.iptables.ipv4['filter'].add_rule(
'FORWARD',
'-s 0.0.0.0/32 -d 255.255.255.255/32 '
'-p udp -m udp --sport 68 --dport 67 -j ACCEPT')
self.dhcp_created = True
self.iptables.apply()
def _create_filter(self, ips, chain_name):
return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
def _get_subnets(self, network_info, version):
subnets = []
for vif in network_info:
if 'network' in vif and 'subnets' in vif['network']:
for subnet in vif['network']['subnets']:
if subnet['version'] == version:
subnets.append(subnet)
return subnets
def _filters_for_instance(self, chain_name, network_info):
"""Creates a rule corresponding to each ip that defines a
jump to the corresponding instance - chain for all the traffic
destined to that ip.
"""
v4_subnets = self._get_subnets(network_info, 4)
v6_subnets = self._get_subnets(network_info, 6)
ips_v4 = [ip['address'] for subnet in v4_subnets
for ip in subnet['ips']]
ipv4_rules = self._create_filter(ips_v4, chain_name)
ipv6_rules = ips_v6 = []
if CONF.use_ipv6:
if v6_subnets:
ips_v6 = [ip['address'] for subnet in v6_subnets
for ip in subnet['ips']]
ipv6_rules = self._create_filter(ips_v6, chain_name)
return ipv4_rules, ipv6_rules
def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
if CONF.use_ipv6:
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
def add_filters_for_instance(self, instance, network_info, inst_ipv4_rules,
inst_ipv6_rules):
chain_name = self._instance_chain_name(instance)
if CONF.use_ipv6:
self.iptables.ipv6['filter'].add_chain(chain_name)
self.iptables.ipv4['filter'].add_chain(chain_name)
ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
network_info)
self._add_filters('local', ipv4_rules, ipv6_rules)
self._add_filters(chain_name, inst_ipv4_rules, inst_ipv6_rules)
def remove_filters_for_instance(self, instance):
chain_name = self._instance_chain_name(instance)
self.iptables.ipv4['filter'].remove_chain(chain_name)
if CONF.use_ipv6:
self.iptables.ipv6['filter'].remove_chain(chain_name)
def _instance_chain_name(self, instance):
return 'inst-%s' % (instance.id,)
def _do_basic_rules(self, ipv4_rules, ipv6_rules, network_info):
# Always drop invalid packets
ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
# Allow established connections
ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
# Pass through provider-wide drops
ipv4_rules += ['-j $provider']
ipv6_rules += ['-j $provider']
def _do_dhcp_rules(self, ipv4_rules, network_info):
v4_subnets = self._get_subnets(network_info, 4)
dhcp_servers = [subnet.get_meta('dhcp_server')
for subnet in v4_subnets if subnet.get_meta('dhcp_server')]
for dhcp_server in dhcp_servers:
if dhcp_server:
ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
'-j ACCEPT' % (dhcp_server,))
self.dhcp_create = True
def _do_project_network_rules(self, ipv4_rules, ipv6_rules, network_info):
v4_subnets = self._get_subnets(network_info, 4)
v6_subnets = self._get_subnets(network_info, 6)
cidrs = [subnet['cidr'] for subnet in v4_subnets]
for cidr in cidrs:
ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
if CONF.use_ipv6:
cidrv6s = [subnet['cidr'] for subnet in v6_subnets]
for cidrv6 in cidrv6s:
ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
def _do_ra_rules(self, ipv6_rules, network_info):
v6_subnets = self._get_subnets(network_info, 6)
gateways_v6 = [subnet['gateway']['address'] for subnet in v6_subnets]
for gateway_v6 in gateways_v6:
ipv6_rules.append(
'-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
def _build_icmp_rule(self, rule, version):
icmp_type = rule['from_port']
icmp_code = rule['to_port']
if icmp_type == -1:
icmp_type_arg = None
else:
icmp_type_arg = '%s' % icmp_type
if not icmp_code == -1:
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
if version == 4:
return ['-m', 'icmp', '--icmp-type', icmp_type_arg]
elif version == 6:
return ['-m', 'icmp6', '--icmpv6-type', icmp_type_arg]
# return empty list if icmp_type == -1
return []
def _build_tcp_udp_rule(self, rule, version):
if rule['from_port'] == rule['to_port']:
return ['--dport', '%s' % (rule['from_port'],)]
else:
return ['-m', 'multiport',
'--dports', '%s:%s' % (rule['from_port'],
rule['to_port'])]
def instance_rules(self, instance, network_info):
ctxt = context.get_admin_context()
if isinstance(instance, dict):
# NOTE(danms): allow old-world instance objects from
# unconverted callers; all we need is instance.uuid below
instance = objects.Instance._from_db_object(
ctxt, objects.Instance(), instance, [])
ipv4_rules = []
ipv6_rules = []
# Initialize with basic rules
self._do_basic_rules(ipv4_rules, ipv6_rules, network_info)
# Set up rules to allow traffic to/from DHCP server
self._do_dhcp_rules(ipv4_rules, network_info)
# Allow project network traffic
if CONF.allow_same_net_traffic:
self._do_project_network_rules(ipv4_rules, ipv6_rules,
network_info)
# We wrap these in CONF.use_ipv6 because they might cause
# a DB lookup. The other ones are just list operations, so
# they're not worth the clutter.
if CONF.use_ipv6:
# Allow RA responses
self._do_ra_rules(ipv6_rules, network_info)
security_groups = objects.SecurityGroupList.get_by_instance(
ctxt, instance)
# then, security group chains and rules
for security_group in security_groups:
rules = objects.SecurityGroupRuleList.get_by_security_group(
ctxt, security_group)
for rule in rules:
if not rule['cidr']:
version = 4
else:
version = netutils.get_ip_version(rule['cidr'])
if version == 4:
fw_rules = ipv4_rules
else:
fw_rules = ipv6_rules
protocol = rule['protocol']
if protocol:
protocol = rule['protocol'].lower()
if version == 6 and protocol == 'icmp':
protocol = 'icmpv6'
args = ['-j ACCEPT']
if protocol:
args += ['-p', protocol]
if protocol in ['udp', 'tcp']:
args += self._build_tcp_udp_rule(rule, version)
elif protocol == 'icmp':
args += self._build_icmp_rule(rule, version)
if rule['cidr']:
args += ['-s', str(rule['cidr'])]
fw_rules += [' '.join(args)]
else:
if rule['grantee_group']:
insts = (
objects.InstanceList.get_by_security_group(
ctxt, rule['grantee_group']))
for instance in insts:
if instance.info_cache['deleted']:
LOG.debug('ignoring deleted cache')
continue
nw_info = compute_utils.get_nw_info_for_instance(
instance)
ips = [ip['address']
for ip in nw_info.fixed_ips()
if ip['version'] == version]
LOG.debug('ips: %r', ips, instance=instance)
for ip in ips:
subrule = args + ['-s %s' % ip]
fw_rules += [' '.join(subrule)]
ipv4_rules += ['-j $sg-fallback']
ipv6_rules += ['-j $sg-fallback']
LOG.debug('Security Groups %s translated to ipv4: %r, ipv6: %r',
security_groups, ipv4_rules, ipv6_rules, instance=instance)
return ipv4_rules, ipv6_rules
def instance_filter_exists(self, instance, network_info):
pass
def refresh_security_group_members(self, security_group):
self.do_refresh_security_group_rules(security_group)
self.iptables.apply()
def refresh_security_group_rules(self, security_group):
self.do_refresh_security_group_rules(security_group)
self.iptables.apply()
def refresh_instance_security_rules(self, instance):
self.do_refresh_instance_rules(instance)
self.iptables.apply()
@utils.synchronized('iptables', external=True)
def _inner_do_refresh_rules(self, instance, network_info, ipv4_rules,
ipv6_rules):
chain_name = self._instance_chain_name(instance)
if not self.iptables.ipv4['filter'].has_chain(chain_name):
LOG.info(
_LI('instance chain %s disappeared during refresh, '
'skipping') % chain_name,
instance=instance)
return
self.remove_filters_for_instance(instance)
self.add_filters_for_instance(instance, network_info, ipv4_rules,
ipv6_rules)
def do_refresh_security_group_rules(self, security_group):
id_list = self.instance_info.keys()
for instance_id in id_list:
try:
instance, network_info = self.instance_info[instance_id]
except KeyError:
# NOTE(danms): instance cache must have been modified,
# ignore this deleted instance and move on
continue
ipv4_rules, ipv6_rules = self.instance_rules(instance,
network_info)
self._inner_do_refresh_rules(instance, network_info, ipv4_rules,
ipv6_rules)
def do_refresh_instance_rules(self, instance):
_instance, network_info = self.instance_info[instance.id]
ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
self._inner_do_refresh_rules(instance, network_info, ipv4_rules,
ipv6_rules)
def refresh_provider_fw_rules(self):
"""See :class:`FirewallDriver` docs."""
self._do_refresh_provider_fw_rules()
self.iptables.apply()
@utils.synchronized('iptables', external=True)
def _do_refresh_provider_fw_rules(self):
"""Internal, synchronized version of refresh_provider_fw_rules."""
self._purge_provider_fw_rules()
self._build_provider_fw_rules()
def _purge_provider_fw_rules(self):
"""Remove all rules from the provider chains."""
self.iptables.ipv4['filter'].empty_chain('provider')
if CONF.use_ipv6:
self.iptables.ipv6['filter'].empty_chain('provider')
def _build_provider_fw_rules(self):
"""Create all rules for the provider IP DROPs."""
self.iptables.ipv4['filter'].add_chain('provider')
if CONF.use_ipv6:
self.iptables.ipv6['filter'].add_chain('provider')
ipv4_rules, ipv6_rules = self._provider_rules()
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule('provider', rule)
if CONF.use_ipv6:
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule('provider', rule)
def _provider_rules(self):
"""Generate a list of rules from provider for IP4 & IP6."""
ctxt = context.get_admin_context()
ipv4_rules = []
ipv6_rules = []
rules = self._virtapi.provider_fw_rule_get_all(ctxt)
for rule in rules:
LOG.debug('Adding provider rule: %s', rule['cidr'])
version = netutils.get_ip_version(rule['cidr'])
if version == 4:
fw_rules = ipv4_rules
else:
fw_rules = ipv6_rules
protocol = rule['protocol']
if version == 6 and protocol == 'icmp':
protocol = 'icmpv6'
args = ['-p', protocol, '-s', rule['cidr']]
if protocol in ['udp', 'tcp']:
if rule['from_port'] == rule['to_port']:
args += ['--dport', '%s' % (rule['from_port'],)]
else:
args += ['-m', 'multiport',
'--dports', '%s:%s' % (rule['from_port'],
rule['to_port'])]
elif protocol == 'icmp':
icmp_type = rule['from_port']
icmp_code = rule['to_port']
if icmp_type == -1:
icmp_type_arg = None
else:
icmp_type_arg = '%s' % icmp_type
if not icmp_code == -1:
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
if version == 4:
args += ['-m', 'icmp', '--icmp-type',
icmp_type_arg]
elif version == 6:
args += ['-m', 'icmp6', '--icmpv6-type',
icmp_type_arg]
args += ['-j DROP']
fw_rules += [' '.join(args)]
return ipv4_rules, ipv6_rules
class NoopFirewallDriver(object):
"""Firewall driver which just provides No-op methods."""
def __init__(self, *args, **kwargs):
pass
def _noop(self, *args, **kwargs):
pass
def __getattr__(self, key):
return self._noop
def instance_filter_exists(self, instance, network_info):
return True
| apache-2.0 |
amgowano/oppia | core/tests/performance_tests/profile_page_test.py | 16 | 1625 | # Copyright 2016 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performance tests for the profile page."""
from core.tests.performance_tests import base
from core.tests.performance_tests import test_config
class ProfilePagePerformanceTest(base.TestBase):
"""Performance tests for the profile page."""
PAGE_KEY = test_config.PAGE_KEY_PROFILE
def setUp(self):
super(ProfilePagePerformanceTest, self).setUp()
page_config = test_config.TEST_DATA[self.PAGE_KEY]
self._set_page_config(page_config, append_username=True)
self._initialize_data_fetcher()
self._load_page_to_cache_server_resources()
def test_page_size_under_specified_limit(self):
self._test_total_page_size()
def test_page_size_under_specified_limit_for_cached_session(self):
self._test_total_page_size_for_cached_session()
def test_page_loads_under_specified_limit(self):
self._test_page_load_time()
def test_page_loads_under_specified_limit_cached_session(self):
self._test_page_load_time_for_cached_session()
| apache-2.0 |
qingqing01/models | fluid/adversarial/advbox/attacks/base.py | 4 | 2226 | """
The base model of the model.
"""
import logging
from abc import ABCMeta
from abc import abstractmethod
import numpy as np
class Attack(object):
"""
Abstract base class for adversarial attacks. `Attack` represent an
adversarial attack which search an adversarial example. subclass should
implement the _apply() method.
Args:
model(Model): an instance of the class advbox.base.Model.
"""
__metaclass__ = ABCMeta
def __init__(self, model):
self.model = model
def __call__(self, adversary, **kwargs):
"""
Generate the adversarial sample.
Args:
adversary(object): The adversary object.
**kwargs: Other named arguments.
"""
self._preprocess(adversary)
return self._apply(adversary, **kwargs)
@abstractmethod
def _apply(self, adversary, **kwargs):
"""
Search an adversarial example.
Args:
adversary(object): The adversary object.
**kwargs: Other named arguments.
"""
raise NotImplementedError
def _preprocess(self, adversary):
"""
Preprocess the adversary object.
:param adversary: adversary
:return: None
"""
assert self.model.channel_axis() == adversary.original.ndim
if adversary.original_label is None:
adversary.original_label = np.argmax(
self.model.predict(adversary.original))
if adversary.is_targeted_attack and adversary.target_label is None:
if adversary.target is None:
raise ValueError(
'When adversary.is_targeted_attack is true, '
'adversary.target_label or adversary.target must be set.')
else:
adversary.target_label = np.argmax(
self.model.predict(adversary.target))
logging.info('adversary:'
'\n original_label: {}'
'\n target_label: {}'
'\n is_targeted_attack: {}'
''.format(adversary.original_label, adversary.target_label,
adversary.is_targeted_attack))
| apache-2.0 |
patrickcurl/ztruck | dj/lib/python2.7/site-packages/pip/req/req_set.py | 30 | 26547 | from __future__ import absolute_import
from collections import defaultdict
import functools
import itertools
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.download import (url_to_path, unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError)
from pip.req.req_install import InstallRequirement
from pip.utils import (
display_path, dist_in_usersite, ensure_dir, normalize_path)
from pip.utils.logging import indent_log
from pip.vcs import vcs
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class DistAbstraction(object):
"""Abstracts out the wheel vs non-wheel prepare_files logic.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- we must be able to generate a list of run-time dependencies
without installing any additional packages (or we would
have to either burn time by doing temporary isolated installs
or alternatively violate pips 'don't start installing unless
all requirements are available' rule - neither of which are
desirable).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
"""
def __init__(self, req_to_install):
self.req_to_install = req_to_install
def dist(self, finder):
"""Return a setuptools Dist object."""
raise NotImplementedError(self.dist)
def prep_for_dist(self):
"""Ensure that we can get a Dist for this requirement."""
raise NotImplementedError(self.dist)
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install)
class IsWheel(DistAbstraction):
def dist(self, finder):
return list(pkg_resources.find_distributions(
self.req_to_install.source_dir))[0]
def prep_for_dist(self):
# FIXME:https://github.com/pypa/pip/issues/1112
pass
class IsSDist(DistAbstraction):
def dist(self, finder):
dist = self.req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
return dist
def prep_for_dist(self):
self.req_to_install.run_egg_info()
self.req_to_install.assert_source_matches_version()
class Installed(DistAbstraction):
def dist(self, finder):
return self.req_to_install.satisfied_by
def prep_for_dist(self):
pass
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False,
use_user_site=False, session=None, pycompile=True,
isolated=False, wheel_download_dir=None,
wheel_cache=None):
"""Create a RequirementSet.
:param wheel_download_dir: Where still-packed .whl files should be
written to. If None they are written to the download_dir parameter.
Separate to download_dir to permit only keeping wheel archives for
pip wheel.
:param download_dir: Where still packed archives should be written to.
If None they are not saved, and are deleted immediately after
unpacking.
:param wheel_cache: The pip wheel cache, for passing to
InstallRequirement.
"""
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
# XXX: download_dir and wheel_download_dir overlap semantically and may
# be combined if we're willing to have non-wheel archives present in
# the wheelhouse output by 'pip wheel'.
self.download_dir = download_dir
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
self._wheel_cache = wheel_cache
# Maps from install_req -> dependencies_of_install_req
self._dependencies = defaultdict(list)
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def __repr__(self):
reqs = [req for req in self.requirements.values()]
reqs.sort(key=lambda req: req.name.lower())
reqs_str = ', '.join([str(req.req) for req in reqs])
return ('<%s object; %d requirement(s): %s>'
% (self.__class__.__name__, len(reqs), reqs_str))
def add_requirement(self, install_req, parent_req_name=None):
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
if not install_req.match_markers():
logger.warning("Ignoring %s: markers %r don't match your "
"environment", install_req.name,
install_req.markers)
return []
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req]
else:
if parent_req_name is None and self.has_requirement(name):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, self.get_requirement(name), name))
if not self.has_requirement(name):
# Add requirement
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
result = [install_req]
else:
# Canonicalise to the already-added object
install_req = self.get_requirement(name)
# No need to scan, this is a duplicate requirement.
result = []
if parent_req_name:
parent_req = self.get_requirement(parent_req_name)
self._dependencies[parent_req].append(install_req)
return result
def has_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements or name in self.requirement_aliases:
return True
return False
@property
def has_requirements(self):
return list(self.requirements.values()) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = os.path.expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def _walk_req_to_install(self, handler):
"""Call handler for all pending reqs.
:param handler: Handle a single requirement. Should take a requirement
to install. Can optionally return an iterable of additional
InstallRequirements to cover.
"""
# The list() here is to avoid potential mutate-while-iterating bugs.
discovered_reqs = []
reqs = itertools.chain(
list(self.unnamed_requirements), list(self.requirements.values()),
discovered_reqs)
for req_to_install in reqs:
more_reqs = handler(req_to_install)
if more_reqs:
discovered_reqs.extend(more_reqs)
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
# make the wheelhouse
if self.wheel_download_dir:
ensure_dir(self.wheel_download_dir)
self._walk_req_to_install(
functools.partial(self._prepare_file, finder))
def _check_skip_installed(self, req_to_install, finder):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
# Check whether to upgrade/reinstall this req or not.
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
skip_reason = 'satisfied (use --upgrade to upgrade)'
if self.upgrade:
best_installed = False
# For link based requirements we have to pull the
# tree down and inspect to assess the version #, so
# its handled way down.
if not (self.force_reinstall or req_to_install.link):
try:
finder.find_requirement(req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
skip_reason = 'up-to-date'
best_installed = True
except DistributionNotFound:
# No distribution found, so we squash the
# error - it will be raised later when we
# re-try later to do the install.
# Why don't we just raise here?
pass
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
return skip_reason
else:
return None
def _prepare_file(self, finder, req_to_install):
"""Prepare a single requirements files.
:return: A list of addition InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
else:
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req_to_install.satisfied_by is None
if not self.ignore_installed:
skip_reason = self._check_skip_installed(
req_to_install, finder)
if req_to_install.satisfied_by:
assert skip_reason is not None, (
'_check_skip_installed returned None but '
'req_to_install.satisfied_by is set to %r'
% (req_to_install.satisfied_by,))
logger.info(
'Requirement already %s: %s', skip_reason,
req_to_install)
else:
if (req_to_install.link and
req_to_install.link.scheme == 'file'):
path = url_to_path(req_to_install.link.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
if req_to_install.editable:
req_to_install.ensure_has_source_dir(self.src_dir)
req_to_install.update_editable(not self.is_download)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
req_to_install.archive(self.download_dir)
elif req_to_install.satisfied_by:
abstract_dist = Installed(req_to_install)
else:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
req_to_install.ensure_has_source_dir(self.build_dir)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req_to_install.source_dir`
if os.path.exists(
os.path.join(req_to_install.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, req_to_install.source_dir)
)
req_to_install.populate_link(finder, self.upgrade)
# We can't hit this spot and have populate_link return None.
# req_to_install.satisfied_by is None here (because we're
# guarded) and upgrade has no impact except when satisfied_by
# is not None.
# Then inside find_requirement existing_applicable -> False
# If no new versions are found, DistributionNotFound is raised,
# otherwise a result is guaranteed.
assert req_to_install.link
try:
download_dir = self.download_dir
# We always delete unpacked sdists after pip ran.
autodelete_unpacked = True
if req_to_install.link.is_wheel \
and self.wheel_download_dir:
# when doing 'pip wheel` we download wheels to a
# dedicated dir.
download_dir = self.wheel_download_dir
if req_to_install.link.is_wheel:
if download_dir:
# When downloading, we only unpack wheels to get
# metadata.
autodelete_unpacked = True
else:
# When installing a wheel, we use the unpacked
# wheel.
autodelete_unpacked = False
unpack_url(
req_to_install.link, req_to_install.source_dir,
download_dir, autodelete_unpacked,
session=self.session)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, req_to_install.link)
)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
# Make a .zip of the source_dir we already created.
if req_to_install.link.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
# ###################### #
# # parse dependencies # #
# ###################### #
dist = abstract_dist.dist(finder)
more_reqs = []
def add_req(subreq):
sub_install_req = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self._wheel_cache,
)
more_reqs.extend(self.add_requirement(
sub_install_req, req_to_install.name))
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install, None)
if not self.ignore_dependencies:
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq)
# cleanup tmp src
self.reqs_to_cleanup.append(req_to_install)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
self.successfully_downloaded.append(req_to_install)
return more_reqs
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
def _to_install(self):
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
ordered_reqs.add(req)
for dep in self._dependencies[req]:
schedule(dep)
order.append(req)
for install_req in self.requirements.values():
schedule(install_req)
return order
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = self._to_install()
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with and not
requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with and
requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
| apache-2.0 |
krishnazure/Flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/jinja2/sandbox.py | 637 | 13445 | # -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import operator
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2._compat import string_types, function_type, method_type, \
traceback_type, code_type, frame_type, generator_type, PY2
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
#: unsafe generator attirbutes.
UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
if not PY2:
UNSAFE_FUNCTION_ATTRIBUTES = set()
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(lambda: None, "func_code")
True
>>> is_internal_attribute((lambda x:x).func_code, 'co_code')
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, function_type):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, method_type):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (code_type, traceback_type, frame_type)):
return True
elif isinstance(obj, generator_type):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occour during the rendering so
the caller has to ensure that all exceptions are catched.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {
'+': operator.pos,
'-': operator.neg
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is excuted for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or
getattr(obj, 'alters_data', False))
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
| apache-2.0 |
moble/sympy | sympy/concrete/tests/test_products.py | 36 | 11612 | from sympy import (symbols, Symbol, product, factorial, rf, sqrt, cos,
Function, Product, Rational, Sum, oo, exp, log, S)
from sympy.utilities.pytest import raises
from sympy import simplify
a, k, n, m, x = symbols('a,k,n,m,x', integer=True)
f = Function('f')
def test_karr_convention():
# Test the Karr product convention that we want to hold.
# See his paper "Summation in Finite Terms" for a detailed
# reasoning why we really want exactly this definition.
# The convention is described for sums on page 309 and
# essentially in section 1.4, definition 3. For products
# we can find in analogy:
#
# \prod_{m <= i < n} f(i) 'has the obvious meaning' for m < n
# \prod_{m <= i < n} f(i) = 0 for m = n
# \prod_{m <= i < n} f(i) = 1 / \prod_{n <= i < m} f(i) for m > n
#
# It is important to note that he defines all products with
# the upper limit being *exclusive*.
# In contrast, sympy and the usual mathematical notation has:
#
# prod_{i = a}^b f(i) = f(a) * f(a+1) * ... * f(b-1) * f(b)
#
# with the upper limit *inclusive*. So translating between
# the two we find that:
#
# \prod_{m <= i < n} f(i) = \prod_{i = m}^{n-1} f(i)
#
# where we intentionally used two different ways to typeset the
# products and its limits.
i = Symbol("i", integer=True)
k = Symbol("k", integer=True)
j = Symbol("j", integer=True)
# A simple example with a concrete factors and symbolic limits.
# The normal product: m = k and n = k + j and therefore m < n:
m = k
n = k + j
a = m
b = n - 1
S1 = Product(i**2, (i, a, b)).doit()
# The reversed product: m = k + j and n = k and therefore m > n:
m = k + j
n = k
a = m
b = n - 1
S2 = Product(i**2, (i, a, b)).doit()
assert simplify(S1 * S2) == 1
# Test the empty product: m = k and n = k and therefore m = n:
m = k
n = k
a = m
b = n - 1
Sz = Product(i**2, (i, a, b)).doit()
assert Sz == 1
# Another example this time with an unspecified factor and
# numeric limits. (We can not do both tests in the same example.)
f = Function("f")
# The normal product with m < n:
m = 2
n = 11
a = m
b = n - 1
S1 = Product(f(i), (i, a, b)).doit()
# The reversed product with m > n:
m = 11
n = 2
a = m
b = n - 1
S2 = Product(f(i), (i, a, b)).doit()
assert simplify(S1 * S2) == 1
# Test the empty product with m = n:
m = 5
n = 5
a = m
b = n - 1
Sz = Product(f(i), (i, a, b)).doit()
assert Sz == 1
def test_karr_proposition_2a():
# Test Karr, page 309, proposition 2, part a
i = Symbol("i", integer=True)
u = Symbol("u", integer=True)
v = Symbol("v", integer=True)
def test_the_product(m, n):
# g
g = i**3 + 2*i**2 - 3*i
# f = Delta g
f = simplify(g.subs(i, i+1) / g)
# The product
a = m
b = n - 1
P = Product(f, (i, a, b)).doit()
# Test if Product_{m <= i < n} f(i) = g(n) / g(m)
assert simplify(P / (g.subs(i, n) / g.subs(i, m))) == 1
# m < n
test_the_product(u, u+v)
# m = n
test_the_product(u, u)
# m > n
test_the_product(u+v, u)
def test_karr_proposition_2b():
# Test Karr, page 309, proposition 2, part b
i = Symbol("i", integer=True)
u = Symbol("u", integer=True)
v = Symbol("v", integer=True)
w = Symbol("w", integer=True)
def test_the_product(l, n, m):
# Productmand
s = i**3
# First product
a = l
b = n - 1
S1 = Product(s, (i, a, b)).doit()
# Second product
a = l
b = m - 1
S2 = Product(s, (i, a, b)).doit()
# Third product
a = m
b = n - 1
S3 = Product(s, (i, a, b)).doit()
# Test if S1 = S2 * S3 as required
assert simplify(S1 / (S2 * S3)) == 1
# l < m < n
test_the_product(u, u+v, u+v+w)
# l < m = n
test_the_product(u, u+v, u+v)
# l < m > n
test_the_product(u, u+v+w, v)
# l = m < n
test_the_product(u, u, u+v)
# l = m = n
test_the_product(u, u, u)
# l = m > n
test_the_product(u+v, u+v, u)
# l > m < n
test_the_product(u+v, u, u+w)
# l > m = n
test_the_product(u+v, u, u)
# l > m > n
test_the_product(u+v+w, u+v, u)
def test_simple_products():
assert product(2, (k, a, n)) == 2**(n - a + 1)
assert product(k, (k, 1, n)) == factorial(n)
assert product(k**3, (k, 1, n)) == factorial(n)**3
assert product(k + 1, (k, 0, n - 1)) == factorial(n)
assert product(k + 1, (k, a, n - 1)) == rf(1 + a, n - a)
assert product(cos(k), (k, 0, 5)) == cos(1)*cos(2)*cos(3)*cos(4)*cos(5)
assert product(cos(k), (k, 3, 5)) == cos(3)*cos(4)*cos(5)
assert product(cos(k), (k, 1, Rational(5, 2))) != cos(1)*cos(2)
assert isinstance(product(k**k, (k, 1, n)), Product)
assert Product(x**k, (k, 1, n)).variables == [k]
raises(ValueError, lambda: Product(n))
raises(ValueError, lambda: Product(n, k))
raises(ValueError, lambda: Product(n, k, 1))
raises(ValueError, lambda: Product(n, k, 1, 10))
raises(ValueError, lambda: Product(n, (k, 1)))
assert product(1, (n, 1, oo)) == 1 # issue 8301
assert product(2, (n, 1, oo)) == oo
assert product(-1, (n, 1, oo)).func is Product
def test_multiple_products():
assert product(x, (n, 1, k), (k, 1, m)) == x**(m**2/2 + m/2)
assert product(f(n), (
n, 1, m), (m, 1, k)) == Product(f(n), (n, 1, m), (m, 1, k)).doit()
assert Product(f(n), (m, 1, k), (n, 1, k)).doit() == \
Product(Product(f(n), (m, 1, k)), (n, 1, k)).doit() == \
product(f(n), (m, 1, k), (n, 1, k)) == \
product(product(f(n), (m, 1, k)), (n, 1, k)) == \
Product(f(n)**k, (n, 1, k))
assert Product(
x, (x, 1, k), (k, 1, n)).doit() == Product(factorial(k), (k, 1, n))
assert Product(x**k, (n, 1, k), (k, 1, m)).variables == [n, k]
def test_rational_products():
assert product(1 + 1/k, (k, 1, n)) == rf(2, n)/factorial(n)
def test_special_products():
# Wallis product
assert product((4*k)**2 / (4*k**2 - 1), (k, 1, n)) == \
4**n*factorial(n)**2/rf(Rational(1, 2), n)/rf(Rational(3, 2), n)
# Euler's product formula for sin
assert product(1 + a/k**2, (k, 1, n)) == \
rf(1 - sqrt(-a), n)*rf(1 + sqrt(-a), n)/factorial(n)**2
def test__eval_product():
from sympy.abc import i, n
# issue 4809
a = Function('a')
assert product(2*a(i), (i, 1, n)) == 2**n * Product(a(i), (i, 1, n))
# issue 4810
assert product(2**i, (i, 1, n)) == 2**(n/2 + n**2/2)
def test_product_pow():
# issue 4817
assert product(2**f(k), (k, 1, n)) == 2**Sum(f(k), (k, 1, n))
assert product(2**(2*f(k)), (k, 1, n)) == 2**Sum(2*f(k), (k, 1, n))
def test_infinite_product():
# issue 5737
assert isinstance(Product(2**(1/factorial(n)), (n, 0, oo)), Product)
def test_conjugate_transpose():
p = Product(x**k, (k, 1, 3))
assert p.adjoint().doit() == p.doit().adjoint()
assert p.conjugate().doit() == p.doit().conjugate()
assert p.transpose().doit() == p.doit().transpose()
A, B = symbols("A B", commutative=False)
p = Product(A*B**k, (k, 1, 3))
assert p.adjoint().doit() == p.doit().adjoint()
assert p.conjugate().doit() == p.doit().conjugate()
assert p.transpose().doit() == p.doit().transpose()
def test_simplify():
y, t, b, c = symbols('y, t, b, c', integer = True)
assert simplify(Product(x*y, (x, n, m), (y, a, k)) * \
Product(y, (x, n, m), (y, a, k))) == \
Product(x*y**2, (x, n, m), (y, a, k))
assert simplify(3 * y* Product(x, (x, n, m)) * Product(x, (x, m + 1, a))) \
== 3 * y * Product(x, (x, n, a))
assert simplify(Product(x, (x, k + 1, a)) * Product(x, (x, n, k))) == \
Product(x, (x, n, a))
assert simplify(Product(x, (x, k + 1, a)) * Product(x + 1, (x, n, k))) == \
Product(x, (x, k + 1, a)) * Product(x + 1, (x, n, k))
assert simplify(Product(x, (t, a, b)) * Product(y, (t, a, b)) * \
Product(x, (t, b+1, c))) == Product(x*y, (t, a, b)) * \
Product(x, (t, b+1, c))
assert simplify(Product(x, (t, a, b)) * Product(x, (t, b+1, c)) * \
Product(y, (t, a, b))) == Product(x*y, (t, a, b)) * \
Product(x, (t, b+1, c))
def test_change_index():
b, y, c, d, z = symbols('b, y, c, d, z', integer = True)
assert Product(x, (x, a, b)).change_index(x, x + 1, y) == \
Product(y - 1, (y, a + 1, b + 1))
assert Product(x**2, (x, a, b)).change_index(x, x - 1) == \
Product((x + 1)**2, (x, a - 1, b - 1))
assert Product(x**2, (x, a, b)).change_index(x, -x, y) == \
Product((-y)**2, (y, -b, -a))
assert Product(x, (x, a, b)).change_index(x, -x - 1) == \
Product(-x - 1, (x, - b - 1, -a - 1))
assert Product(x*y, (x, a, b), (y, c, d)).change_index(x, x - 1, z) == \
Product((z + 1)*y, (z, a - 1, b - 1), (y, c, d))
def test_reorder():
b, y, c, d, z = symbols('b, y, c, d, z', integer = True)
assert Product(x*y, (x, a, b), (y, c, d)).reorder((0, 1)) == \
Product(x*y, (y, c, d), (x, a, b))
assert Product(x, (x, a, b), (x, c, d)).reorder((0, 1)) == \
Product(x, (x, c, d), (x, a, b))
assert Product(x*y + z, (x, a, b), (z, m, n), (y, c, d)).reorder(\
(2, 0), (0, 1)) == Product(x*y + z, (z, m, n), (y, c, d), (x, a, b))
assert Product(x*y*z, (x, a, b), (y, c, d), (z, m, n)).reorder(\
(0, 1), (1, 2), (0, 2)) == \
Product(x*y*z, (x, a, b), (z, m, n), (y, c, d))
assert Product(x*y*z, (x, a, b), (y, c, d), (z, m, n)).reorder(\
(x, y), (y, z), (x, z)) == \
Product(x*y*z, (x, a, b), (z, m, n), (y, c, d))
assert Product(x*y, (x, a, b), (y, c, d)).reorder((x, 1)) == \
Product(x*y, (y, c, d), (x, a, b))
assert Product(x*y, (x, a, b), (y, c, d)).reorder((y, x)) == \
Product(x*y, (y, c, d), (x, a, b))
def test_reverse_order():
x, y, a, b, c, d= symbols('x, y, a, b, c, d', integer = True)
assert Product(x, (x, 0, 3)).reverse_order(0) == Product(1/x, (x, 4, -1))
assert Product(x*y, (x, 1, 5), (y, 0, 6)).reverse_order(0, 1) == \
Product(x*y, (x, 6, 0), (y, 7, -1))
assert Product(x, (x, 1, 2)).reverse_order(0) == Product(1/x, (x, 3, 0))
assert Product(x, (x, 1, 3)).reverse_order(0) == Product(1/x, (x, 4, 0))
assert Product(x, (x, 1, a)).reverse_order(0) == Product(1/x, (x, a + 1, 0))
assert Product(x, (x, a, 5)).reverse_order(0) == Product(1/x, (x, 6, a - 1))
assert Product(x, (x, a + 1, a + 5)).reverse_order(0) == \
Product(1/x, (x, a + 6, a))
assert Product(x, (x, a + 1, a + 2)).reverse_order(0) == \
Product(1/x, (x, a + 3, a))
assert Product(x, (x, a + 1, a + 1)).reverse_order(0) == \
Product(1/x, (x, a + 2, a))
assert Product(x, (x, a, b)).reverse_order(0) == Product(1/x, (x, b + 1, a - 1))
assert Product(x, (x, a, b)).reverse_order(x) == Product(1/x, (x, b + 1, a - 1))
assert Product(x*y, (x, a, b), (y, 2, 5)).reverse_order(x, 1) == \
Product(x*y, (x, b + 1, a - 1), (y, 6, 1))
assert Product(x*y, (x, a, b), (y, 2, 5)).reverse_order(y, x) == \
Product(x*y, (x, b + 1, a - 1), (y, 6, 1))
def test_rewrite_Sum():
assert Product(1 - S.Half**2/k**2, (k, 1, oo)).rewrite(Sum) == \
exp(Sum(log(1 - 1/(4*k**2)), (k, 1, oo)))
| bsd-3-clause |
esthermm/odoomrp-wip | product_variants_no_automatic_creation/tests/test_product_variants.py | 10 | 4833 | # -*- coding: utf-8 -*-
# © 2016 Oihane Crucelaegui - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp.tests.common import TransactionCase
class TestProductVariant(TransactionCase):
def setUp(self):
super(TestProductVariant, self).setUp()
self.tmpl_model = self.env['product.template'].with_context(
check_variant_creation=True)
self.categ_model = self.env['product.category']
self.categ1 = self.categ_model.create({
'name': 'No create variants category',
})
self.categ2 = self.categ_model.create({
'name': 'Create variants category',
'no_create_variants': False,
})
self.attribute = self.env['product.attribute'].create({
'name': 'Test Attribute',
})
self.value1 = self.env['product.attribute.value'].create({
'name': 'Value 1',
'attribute_id': self.attribute.id,
})
self.value2 = self.env['product.attribute.value'].create({
'name': 'Value 2',
'attribute_id': self.attribute.id,
})
def test_no_create_variants(self):
tmpl = self.tmpl_model.create({
'name': 'No create variants template',
'no_create_variants': 'yes',
'attribute_line_ids': [
(0, 0, {'attribute_id': self.attribute.id,
'value_ids': [(6, 0, [self.value1.id,
self.value2.id])]})],
})
self.assertEquals(len(tmpl.product_variant_ids), 0)
tmpl = self.tmpl_model.create({
'name': 'No variants template',
'no_create_variants': 'yes',
})
self.assertEquals(len(tmpl.product_variant_ids), 0)
def test_no_create_variants_category(self):
self.assertTrue(self.categ1.no_create_variants)
tmpl = self.tmpl_model.create({
'name': 'Category option template',
'categ_id': self.categ1.id,
'attribute_line_ids': [
(0, 0, {'attribute_id': self.attribute.id,
'value_ids': [(6, 0, [self.value1.id,
self.value2.id])]})],
})
self.assertTrue(tmpl.no_create_variants == 'empty')
self.assertEquals(len(tmpl.product_variant_ids), 0)
tmpl = self.tmpl_model.create({
'name': 'No variants template',
'categ_id': self.categ1.id,
})
self.assertTrue(tmpl.no_create_variants == 'empty')
self.assertEquals(len(tmpl.product_variant_ids), 0)
def test_create_variants(self):
tmpl = self.tmpl_model.create({
'name': 'Create variants template',
'no_create_variants': 'no',
'attribute_line_ids': [
(0, 0, {'attribute_id': self.attribute.id,
'value_ids': [(6, 0, [self.value1.id,
self.value2.id])]})],
})
self.assertEquals(len(tmpl.product_variant_ids), 2)
tmpl = self.tmpl_model.create({
'name': 'No variants template',
'no_create_variants': 'no',
})
self.assertEquals(len(tmpl.product_variant_ids), 1)
def test_create_variants_category(self):
self.assertFalse(self.categ2.no_create_variants)
tmpl = self.tmpl_model.create({
'name': 'Category option template',
'categ_id': self.categ2.id,
'attribute_line_ids': [
(0, 0, {'attribute_id': self.attribute.id,
'value_ids': [(6, 0, [self.value1.id,
self.value2.id])]})],
})
self.assertTrue(tmpl.no_create_variants == 'empty')
self.assertEquals(len(tmpl.product_variant_ids), 2)
tmpl = self.tmpl_model.create({
'name': 'No variants template',
'categ_id': self.categ2.id,
})
self.assertTrue(tmpl.no_create_variants == 'empty')
self.assertEquals(len(tmpl.product_variant_ids), 1)
def test_category_change(self):
self.assertTrue(self.categ1.no_create_variants)
tmpl = self.tmpl_model.create({
'name': 'Category option template',
'categ_id': self.categ1.id,
'attribute_line_ids': [
(0, 0, {'attribute_id': self.attribute.id,
'value_ids': [(6, 0, [self.value1.id,
self.value2.id])]})],
})
self.assertTrue(tmpl.no_create_variants == 'empty')
self.assertEquals(len(tmpl.product_variant_ids), 0)
self.categ1.no_create_variants = False
self.assertEquals(len(tmpl.product_variant_ids), 2)
| agpl-3.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/core/reshape/merge.py | 3 | 53839 | """
SQL-style merge routines
"""
import copy
import warnings
import string
import numpy as np
from pandas.compat import range, lzip, zip, map, filter
import pandas.compat as compat
from pandas import (Categorical, Series, DataFrame,
Index, MultiIndex, Timedelta)
from pandas.core.frame import _merge_doc
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
is_datetime64_dtype,
needs_i8_conversion,
is_int64_dtype,
is_categorical_dtype,
is_integer_dtype,
is_float_dtype,
is_numeric_dtype,
is_integer,
is_int_or_datetime_dtype,
is_dtype_equal,
is_bool,
is_list_like,
_ensure_int64,
_ensure_float64,
_ensure_object,
_get_dtype)
from pandas.core.dtypes.missing import na_value_for_dtype
from pandas.core.internals import (items_overlap_with_suffix,
concatenate_block_managers)
from pandas.util._decorators import Appender, Substitution
from pandas.core.sorting import is_int64_overflow_possible
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas._libs import hashtable as libhashtable, join as libjoin, lib
@Substitution('\nleft : DataFrame')
@Appender(_merge_doc, indents=0)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False):
op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
copy=copy, indicator=indicator)
return op.get_result()
if __debug__:
merge.__doc__ = _merge_doc % '\nleft : DataFrame'
class MergeError(ValueError):
pass
def _groupby_and_merge(by, on, left, right, _merge_pieces,
check_duplicates=True):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: left frame
right: right frame
_merge_pieces: function for merging
check_duplicates: boolean, default True
should we check & clean duplicates
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
# if we can groupby the rhs
# then we can get vastly better perf
try:
# we will check & remove duplicates if indicated
if check_duplicates:
if on is None:
on = []
elif not isinstance(on, (list, tuple)):
on = [on]
if right.duplicated(by + on).any():
right = right.drop_duplicates(by + on, keep='last')
rby = right.groupby(by, sort=False)
except KeyError:
rby = None
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns
if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = _merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should _merge_pieces do this?
for k in by:
try:
if k in merged:
merged[k] = key
except:
pass
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
from pandas.core.reshape.concat import concat
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby
def ordered_merge(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y')):
warnings.warn("ordered_merge is deprecated and replaced by merge_ordered",
FutureWarning, stacklevel=2)
return merge_ordered(left, right, on=on,
left_on=left_on, right_on=right_on,
left_by=left_by, right_by=right_by,
fill_method=fill_method, suffixes=suffixes)
def merge_ordered(left, right, on=None,
left_on=None, right_on=None,
left_by=None, right_by=None,
fill_method=None, suffixes=('_x', '_y'),
how='outer'):
"""Perform merge with optional filling/interpolation designed for ordered
data like time series data. Optionally perform group-wise merge (see
examples)
Parameters
----------
left : DataFrame
right : DataFrame
on : label or list
Field names to join on. Must be found in both DataFrames.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_by : column name or list of column names
Group left DataFrame by group columns and merge piece by piece with
right DataFrame
right_by : column name or list of column names
Group right DataFrame by group columns and merge piece by piece with
left DataFrame
fill_method : {'ffill', None}, default None
Interpolation method for data
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
how : {'left', 'right', 'outer', 'inner'}, default 'outer'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
.. versionadded:: 0.19.0
Examples
--------
>>> A >>> B
key lvalue group key rvalue
0 a 1 a 0 b 1
1 c 2 a 1 c 2
2 e 3 a 2 d 3
3 a 1 b
4 c 2 b
5 e 3 b
>>> ordered_merge(A, B, fill_method='ffill', left_by='group')
key lvalue group rvalue
0 a 1 a NaN
1 b 1 a 1
2 c 2 a 2
3 d 2 a 3
4 e 3 a 3
5 f 3 a 4
6 a 1 b NaN
7 b 1 b 1
8 c 2 b 2
9 d 2 b 3
10 e 3 b 3
11 f 3 b 4
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
See also
--------
merge
merge_asof
"""
def _merger(x, y):
# perform the ordered merge operation
op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on,
suffixes=suffixes, fill_method=fill_method,
how=how)
return op.get_result()
if left_by is not None and right_by is not None:
raise ValueError('Can only group either left or right frames')
elif left_by is not None:
result, _ = _groupby_and_merge(left_by, on, left, right,
lambda x, y: _merger(x, y),
check_duplicates=False)
elif right_by is not None:
result, _ = _groupby_and_merge(right_by, on, right, left,
lambda x, y: _merger(y, x),
check_duplicates=False)
else:
result = _merger(left, right)
return result
ordered_merge.__doc__ = merge_ordered.__doc__
def merge_asof(left, right, on=None,
left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
suffixes=('_x', '_y'),
tolerance=None,
allow_exact_matches=True,
direction='backward'):
"""Perform an asof merge. This is similar to a left-join except that we
match on nearest key rather than equal keys.
Both DataFrames must be sorted by the key.
For each row in the left DataFrame:
- A "backward" search selects the last row in the right DataFrame whose
'on' key is less than or equal to the left's key.
- A "forward" search selects the first row in the right DataFrame whose
'on' key is greater than or equal to the left's key.
- A "nearest" search selects the row in the right DataFrame whose 'on'
key is closest in absolute distance to the left's key.
The default is "backward" and is compatible in versions below 0.20.0.
The direction parameter was added in version 0.20.0 and introduces
"forward" and "nearest".
Optionally match on equivalent keys with 'by' before searching with 'on'.
.. versionadded:: 0.19.0
Parameters
----------
left : DataFrame
right : DataFrame
on : label
Field name to join on. Must be found in both DataFrames.
The data MUST be ordered. Furthermore this must be a numeric column,
such as datetimelike, integer, or float. On or left_on/right_on
must be given.
left_on : label
Field name to join on in left DataFrame.
right_on : label
Field name to join on in right DataFrame.
left_index : boolean
Use the index of the left DataFrame as the join key.
.. versionadded:: 0.19.2
right_index : boolean
Use the index of the right DataFrame as the join key.
.. versionadded:: 0.19.2
by : column name or list of column names
Match on these columns before performing merge operation.
left_by : column name
Field names to match on in the left DataFrame.
.. versionadded:: 0.19.2
right_by : column name
Field names to match on in the right DataFrame.
.. versionadded:: 0.19.2
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively.
tolerance : integer or Timedelta, optional, default None
Select asof tolerance within this range; must be compatible
with the merge index.
allow_exact_matches : boolean, default True
- If True, allow matching with the same 'on' value
(i.e. less-than-or-equal-to / greater-than-or-equal-to)
- If False, don't match the same 'on' value
(i.e., stricly less-than / strictly greater-than)
direction : 'backward' (default), 'forward', or 'nearest'
Whether to search for prior, subsequent, or closest matches.
.. versionadded:: 0.20.0
Returns
-------
merged : DataFrame
Examples
--------
>>> left
a left_val
0 1 a
1 5 b
2 10 c
>>> right
a right_val
0 1 1
1 2 2
2 3 3
3 6 6
4 7 7
>>> pd.merge_asof(left, right, on='a')
a left_val right_val
0 1 a 1
1 5 b 3
2 10 c 7
>>> pd.merge_asof(left, right, on='a', allow_exact_matches=False)
a left_val right_val
0 1 a NaN
1 5 b 3.0
2 10 c 7.0
>>> pd.merge_asof(left, right, on='a', direction='forward')
a left_val right_val
0 1 a 1.0
1 5 b 6.0
2 10 c NaN
>>> pd.merge_asof(left, right, on='a', direction='nearest')
a left_val right_val
0 1 a 1
1 5 b 6
2 10 c 7
We can use indexed DataFrames as well.
>>> left
left_val
1 a
5 b
10 c
>>> right
right_val
1 1
2 2
3 3
6 6
7 7
>>> pd.merge_asof(left, right, left_index=True, right_index=True)
left_val right_val
1 a 1
5 b 3
10 c 7
Here is a real-world times-series example
>>> quotes
time ticker bid ask
0 2016-05-25 13:30:00.023 GOOG 720.50 720.93
1 2016-05-25 13:30:00.023 MSFT 51.95 51.96
2 2016-05-25 13:30:00.030 MSFT 51.97 51.98
3 2016-05-25 13:30:00.041 MSFT 51.99 52.00
4 2016-05-25 13:30:00.048 GOOG 720.50 720.93
5 2016-05-25 13:30:00.049 AAPL 97.99 98.01
6 2016-05-25 13:30:00.072 GOOG 720.50 720.88
7 2016-05-25 13:30:00.075 MSFT 52.01 52.03
>>> trades
time ticker price quantity
0 2016-05-25 13:30:00.023 MSFT 51.95 75
1 2016-05-25 13:30:00.038 MSFT 51.95 155
2 2016-05-25 13:30:00.048 GOOG 720.77 100
3 2016-05-25 13:30:00.048 GOOG 720.92 100
4 2016-05-25 13:30:00.048 AAPL 98.00 100
By default we are taking the asof of the quotes
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker')
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 2ms betwen the quote time and the trade time
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('2ms'))
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96
1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
We only asof within 10ms betwen the quote time and the trade time
and we exclude exact matches on time. However *prior* data will
propogate forward
>>> pd.merge_asof(trades, quotes,
... on='time',
... by='ticker',
... tolerance=pd.Timedelta('10ms'),
... allow_exact_matches=False)
time ticker price quantity bid ask
0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN
1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98
2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93
3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93
4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN
See also
--------
merge
merge_ordered
"""
op = _AsOfMerge(left, right,
on=on, left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index,
by=by, left_by=left_by, right_by=right_by,
suffixes=suffixes,
how='asof', tolerance=tolerance,
allow_exact_matches=allow_exact_matches,
direction=direction)
return op.get_result()
# TODO: transformations??
# TODO: only copy DataFrames when modification necessary
class _MergeOperation(object):
"""
Perform a database (SQL) merge operation between two DataFrame objects
using either columns as keys or their row indexes
"""
_merge_type = 'merge'
def __init__(self, left, right, how='inner', on=None,
left_on=None, right_on=None, axis=1,
left_index=False, right_index=False, sort=True,
suffixes=('_x', '_y'), copy=True, indicator=False):
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
self.axis = axis
self.on = com._maybe_make_list(on)
self.left_on = com._maybe_make_list(left_on)
self.right_on = com._maybe_make_list(right_on)
self.copy = copy
self.suffixes = suffixes
self.sort = sort
self.left_index = left_index
self.right_index = right_index
self.indicator = indicator
if isinstance(self.indicator, compat.string_types):
self.indicator_name = self.indicator
elif isinstance(self.indicator, bool):
self.indicator_name = '_merge' if self.indicator else None
else:
raise ValueError(
'indicator option can only accept boolean or string arguments')
if not isinstance(left, DataFrame):
raise ValueError(
'can not merge DataFrame with instance of '
'type {0}'.format(type(left)))
if not isinstance(right, DataFrame):
raise ValueError(
'can not merge DataFrame with instance of '
'type {0}'.format(type(right)))
if not is_bool(left_index):
raise ValueError(
'left_index parameter must be of type bool, not '
'{0}'.format(type(left_index)))
if not is_bool(right_index):
raise ValueError(
'right_index parameter must be of type bool, not '
'{0}'.format(type(right_index)))
# warn user when merging between different levels
if left.columns.nlevels != right.columns.nlevels:
msg = ('merging between different levels can give an unintended '
'result ({0} levels on the left, {1} on the right)')
msg = msg.format(left.columns.nlevels, right.columns.nlevels)
warnings.warn(msg, UserWarning)
self._validate_specification()
# note this function has side effects
(self.left_join_keys,
self.right_join_keys,
self.join_names) = self._get_merge_keys()
# validate the merge keys dtypes. We may need to coerce
# to avoid incompat dtypes
self._maybe_coerce_merge_keys()
def get_result(self):
if self.indicator:
self.left, self.right = self._indicator_pre_merge(
self.left, self.right)
join_index, left_indexer, right_indexer = self._get_join_info()
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
lindexers = {1: left_indexer} if left_indexer is not None else {}
rindexers = {1: right_indexer} if right_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
if self.indicator:
result = self._indicator_post_merge(result)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _indicator_pre_merge(self, left, right):
columns = left.columns.union(right.columns)
for i in ['_left_indicator', '_right_indicator']:
if i in columns:
raise ValueError("Cannot use `indicator=True` option when "
"data contains a column named {}".format(i))
if self.indicator_name in columns:
raise ValueError(
"Cannot use name of an existing column for indicator column")
left = left.copy()
right = right.copy()
left['_left_indicator'] = 1
left['_left_indicator'] = left['_left_indicator'].astype('int8')
right['_right_indicator'] = 2
right['_right_indicator'] = right['_right_indicator'].astype('int8')
return left, right
def _indicator_post_merge(self, result):
result['_left_indicator'] = result['_left_indicator'].fillna(0)
result['_right_indicator'] = result['_right_indicator'].fillna(0)
result[self.indicator_name] = Categorical((result['_left_indicator'] +
result['_right_indicator']),
categories=[1, 2, 3])
result[self.indicator_name] = (
result[self.indicator_name]
.cat.rename_categories(['left_only', 'right_only', 'both']))
result = result.drop(labels=['_left_indicator', '_right_indicator'],
axis=1)
return result
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
right_has_missing = None
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (left_indexer == -1).any()
if left_has_missing:
take_right = self.right_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.left[name].dtype):
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (right_indexer == -1).any()
if right_has_missing:
take_left = self.left_join_keys[i]
if not is_dtype_equal(result[name].dtype,
self.right[name].dtype):
take_right = self.right[name]._values
elif left_indexer is not None \
and isinstance(self.left_join_keys[i], np.ndarray):
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
if take_left is not None or take_right is not None:
if take_left is None:
lvals = result[name]._values
else:
lfill = na_value_for_dtype(take_left.dtype)
lvals = algos.take_1d(take_left, left_indexer,
fill_value=lfill)
if take_right is None:
rvals = result[name]._values
else:
rfill = na_value_for_dtype(take_right.dtype)
rvals = algos.take_1d(take_right, right_indexer,
fill_value=rfill)
# if we have an all missing left_indexer
# make sure to just use the right values
mask = left_indexer == -1
if mask.all():
key_col = rvals
else:
key_col = Index(lvals).where(~mask, rvals)
if name in result:
result[name] = key_col
else:
result.insert(i, name or 'key_%d' % i, key_col)
def _get_join_indexers(self):
""" return the join indexers """
return _get_join_indexers(self.left_join_keys,
self.right_join_keys,
sort=self.sort,
how=self.how)
def _get_join_info(self):
left_ax = self.left._data.axes[self.axis]
right_ax = self.right._data.axes[self.axis]
if self.left_index and self.right_index and self.how != 'asof':
join_index, left_indexer, right_indexer = \
left_ax.join(right_ax, how=self.how, return_indexers=True,
sort=self.sort)
elif self.right_index and self.how == 'left':
join_index, left_indexer, right_indexer = \
_left_join_on_index(left_ax, right_ax, self.left_join_keys,
sort=self.sort)
elif self.left_index and self.how == 'right':
join_index, right_indexer, left_indexer = \
_left_join_on_index(right_ax, left_ax, self.right_join_keys,
sort=self.sort)
else:
(left_indexer,
right_indexer) = self._get_join_indexers()
if self.right_index:
if len(self.left) > 0:
join_index = self.left.index.take(left_indexer)
else:
join_index = self.right.index.take(right_indexer)
left_indexer = np.array([-1] * len(join_index))
elif self.left_index:
if len(self.right) > 0:
join_index = self.right.index.take(right_indexer)
else:
join_index = self.left.index.take(left_indexer)
right_indexer = np.array([-1] * len(join_index))
else:
join_index = Index(np.arange(len(left_indexer)))
if len(join_index) == 0:
join_index = join_index.astype(object)
return join_index, left_indexer, right_indexer
def _get_merge_keys(self):
"""
Note: has side effects (copy/delete key columns)
Parameters
----------
left
right
on
Returns
-------
left_keys, right_keys
"""
left_keys = []
right_keys = []
join_names = []
right_drop = []
left_drop = []
left, right = self.left, self.right
is_lkey = lambda x: isinstance(
x, (np.ndarray, Series)) and len(x) == len(left)
is_rkey = lambda x: isinstance(
x, (np.ndarray, Series)) and len(x) == len(right)
# Note that pd.merge_asof() has separate 'on' and 'by' parameters. A
# user could, for example, request 'left_index' and 'left_by'. In a
# regular pd.merge(), users cannot specify both 'left_index' and
# 'left_on'. (Instead, users have a MultiIndex). That means the
# self.left_on in this function is always empty in a pd.merge(), but
# a pd.merge_asof(left_index=True, left_by=...) will result in a
# self.left_on array with a None in the middle of it. This requires
# a work-around as designated in the code below.
# See _validate_specification() for where this happens.
# ugh, spaghetti re #733
if _any(self.left_on) and _any(self.right_on):
for lk, rk in zip(self.left_on, self.right_on):
if is_lkey(lk):
left_keys.append(lk)
if is_rkey(rk):
right_keys.append(rk)
join_names.append(None) # what to do?
else:
if rk is not None:
right_keys.append(right[rk]._values)
join_names.append(rk)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
join_names.append(right.index.name)
else:
if not is_rkey(rk):
if rk is not None:
right_keys.append(right[rk]._values)
else:
# work-around for merge_asof(right_index=True)
right_keys.append(right.index)
if lk is not None and lk == rk:
# avoid key upcast in corner case (length-0)
if len(left) > 0:
right_drop.append(rk)
else:
left_drop.append(lk)
else:
right_keys.append(rk)
if lk is not None:
left_keys.append(left[lk]._values)
join_names.append(lk)
else:
# work-around for merge_asof(left_index=True)
left_keys.append(left.index)
join_names.append(left.index.name)
elif _any(self.left_on):
for k in self.left_on:
if is_lkey(k):
left_keys.append(k)
join_names.append(None)
else:
left_keys.append(left[k]._values)
join_names.append(k)
if isinstance(self.right.index, MultiIndex):
right_keys = [lev._values.take(lab)
for lev, lab in zip(self.right.index.levels,
self.right.index.labels)]
else:
right_keys = [self.right.index.values]
elif _any(self.right_on):
for k in self.right_on:
if is_rkey(k):
right_keys.append(k)
join_names.append(None)
else:
right_keys.append(right[k]._values)
join_names.append(k)
if isinstance(self.left.index, MultiIndex):
left_keys = [lev._values.take(lab)
for lev, lab in zip(self.left.index.levels,
self.left.index.labels)]
else:
left_keys = [self.left.index.values]
if left_drop:
self.left = self.left.drop(left_drop, axis=1)
if right_drop:
self.right = self.right.drop(right_drop, axis=1)
return left_keys, right_keys, join_names
def _maybe_coerce_merge_keys(self):
# we have valid mergee's but we may have to further
# coerce these if they are originally incompatible types
#
# for example if these are categorical, but are not dtype_equal
# or if we have object and integer dtypes
for lk, rk, name in zip(self.left_join_keys,
self.right_join_keys,
self.join_names):
if (len(lk) and not len(rk)) or (not len(lk) and len(rk)):
continue
# if either left or right is a categorical
# then the must match exactly in categories & ordered
if is_categorical_dtype(lk) and is_categorical_dtype(rk):
if lk.is_dtype_equal(rk):
continue
elif is_categorical_dtype(lk) or is_categorical_dtype(rk):
pass
elif is_dtype_equal(lk.dtype, rk.dtype):
continue
# if we are numeric, then allow differing
# kinds to proceed, eg. int64 and int8
# further if we are object, but we infer to
# the same, then proceed
if (is_numeric_dtype(lk) and is_numeric_dtype(rk)):
if lk.dtype.kind == rk.dtype.kind:
continue
# let's infer and see if we are ok
if lib.infer_dtype(lk) == lib.infer_dtype(rk):
continue
# Houston, we have a problem!
# let's coerce to object
if name in self.left.columns:
self.left = self.left.assign(
**{name: self.left[name].astype(object)})
if name in self.right.columns:
self.right = self.right.assign(
**{name: self.right[name].astype(object)})
def _validate_specification(self):
# Hm, any way to make this logic less complicated??
if self.on is None and self.left_on is None and self.right_on is None:
if self.left_index and self.right_index:
self.left_on, self.right_on = (), ()
elif self.left_index:
if self.right_on is None:
raise MergeError('Must pass right_on or right_index=True')
elif self.right_index:
if self.left_on is None:
raise MergeError('Must pass left_on or left_index=True')
else:
# use the common columns
common_cols = self.left.columns.intersection(
self.right.columns)
if len(common_cols) == 0:
raise MergeError('No common columns to perform merge on')
if not common_cols.is_unique:
raise MergeError("Data columns not unique: %s"
% repr(common_cols))
self.left_on = self.right_on = common_cols
elif self.on is not None:
if self.left_on is not None or self.right_on is not None:
raise MergeError('Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.')
self.left_on = self.right_on = self.on
elif self.left_on is not None:
n = len(self.left_on)
if self.right_index:
if len(self.left_on) != self.right.index.nlevels:
raise ValueError('len(left_on) must equal the number '
'of levels in the index of "right"')
self.right_on = [None] * n
elif self.right_on is not None:
n = len(self.right_on)
if self.left_index:
if len(self.right_on) != self.left.index.nlevels:
raise ValueError('len(right_on) must equal the number '
'of levels in the index of "left"')
self.left_on = [None] * n
if len(self.right_on) != len(self.left_on):
raise ValueError("len(right_on) must equal len(left_on)")
def _get_join_indexers(left_keys, right_keys, sort=False, how='inner',
**kwargs):
"""
Parameters
----------
left_keys: ndarray, Index, Series
right_keys: ndarray, Index, Series
sort: boolean, default False
how: string {'inner', 'outer', 'left', 'right'}, default 'inner'
Returns
-------
tuple of (left_indexer, right_indexer)
indexers into the left_keys, right_keys
"""
from functools import partial
assert len(left_keys) == len(right_keys), \
'left_key and right_keys must be the same length'
# bind `sort` arg. of _factorize_keys
fkeys = partial(_factorize_keys, sort=sort)
# get left & right join labels and num. of levels at each location
llab, rlab, shape = map(list, zip(* map(fkeys, left_keys, right_keys)))
# get flat i8 keys from label lists
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
# `count` is the num. of unique keys
# set(lkey) | set(rkey) == range(count)
lkey, rkey, count = fkeys(lkey, rkey)
# preserve left frame order if how == 'left' and sort == False
kwargs = copy.copy(kwargs)
if how == 'left':
kwargs['sort'] = sort
join_func = _join_functions[how]
return join_func(lkey, rkey, count, **kwargs)
class _OrderedMerge(_MergeOperation):
_merge_type = 'ordered_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False, axis=1,
suffixes=('_x', '_y'), copy=True,
fill_method=None, how='outer'):
self.fill_method = fill_method
_MergeOperation.__init__(self, left, right, on=on, left_on=left_on,
left_index=left_index,
right_index=right_index,
right_on=right_on, axis=axis,
how=how, suffixes=suffixes,
sort=True # factorize sorts
)
def get_result(self):
join_index, left_indexer, right_indexer = self._get_join_info()
# this is a bit kludgy
ldata, rdata = self.left._data, self.right._data
lsuf, rsuf = self.suffixes
llabels, rlabels = items_overlap_with_suffix(ldata.items, lsuf,
rdata.items, rsuf)
if self.fill_method == 'ffill':
left_join_indexer = libjoin.ffill_indexer(left_indexer)
right_join_indexer = libjoin.ffill_indexer(right_indexer)
else:
left_join_indexer = left_indexer
right_join_indexer = right_indexer
lindexers = {
1: left_join_indexer} if left_join_indexer is not None else {}
rindexers = {
1: right_join_indexer} if right_join_indexer is not None else {}
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
concat_axis=0, copy=self.copy)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method=self._merge_type)
self._maybe_add_join_keys(result, left_indexer, right_indexer)
return result
def _asof_function(direction, on_type):
return getattr(libjoin, 'asof_join_%s_%s' % (direction, on_type), None)
def _asof_by_function(direction, on_type, by_type):
return getattr(libjoin, 'asof_join_%s_%s_by_%s' %
(direction, on_type, by_type), None)
_type_casters = {
'int64_t': _ensure_int64,
'double': _ensure_float64,
'object': _ensure_object,
}
_cython_types = {
'uint8': 'uint8_t',
'uint32': 'uint32_t',
'uint16': 'uint16_t',
'uint64': 'uint64_t',
'int8': 'int8_t',
'int32': 'int32_t',
'int16': 'int16_t',
'int64': 'int64_t',
'float16': 'error',
'float32': 'float',
'float64': 'double',
}
def _get_cython_type(dtype):
""" Given a dtype, return a C name like 'int64_t' or 'double' """
type_name = _get_dtype(dtype).name
ctype = _cython_types.get(type_name, 'object')
if ctype == 'error':
raise MergeError('unsupported type: ' + type_name)
return ctype
def _get_cython_type_upcast(dtype):
""" Upcast a dtype to 'int64_t', 'double', or 'object' """
if is_integer_dtype(dtype):
return 'int64_t'
elif is_float_dtype(dtype):
return 'double'
else:
return 'object'
class _AsOfMerge(_OrderedMerge):
_merge_type = 'asof_merge'
def __init__(self, left, right, on=None, left_on=None, right_on=None,
left_index=False, right_index=False,
by=None, left_by=None, right_by=None,
axis=1, suffixes=('_x', '_y'), copy=True,
fill_method=None,
how='asof', tolerance=None,
allow_exact_matches=True,
direction='backward'):
self.by = by
self.left_by = left_by
self.right_by = right_by
self.tolerance = tolerance
self.allow_exact_matches = allow_exact_matches
self.direction = direction
_OrderedMerge.__init__(self, left, right, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, axis=axis,
how=how, suffixes=suffixes,
fill_method=fill_method)
def _validate_specification(self):
super(_AsOfMerge, self)._validate_specification()
# we only allow on to be a single item for on
if len(self.left_on) != 1 and not self.left_index:
raise MergeError("can only asof on a key for left")
if len(self.right_on) != 1 and not self.right_index:
raise MergeError("can only asof on a key for right")
if self.left_index and isinstance(self.left.index, MultiIndex):
raise MergeError("left can only have one index")
if self.right_index and isinstance(self.right.index, MultiIndex):
raise MergeError("right can only have one index")
# set 'by' columns
if self.by is not None:
if self.left_by is not None or self.right_by is not None:
raise MergeError('Can only pass by OR left_by '
'and right_by')
self.left_by = self.right_by = self.by
if self.left_by is None and self.right_by is not None:
raise MergeError('missing left_by')
if self.left_by is not None and self.right_by is None:
raise MergeError('missing right_by')
# add 'by' to our key-list so we can have it in the
# output as a key
if self.left_by is not None:
if not is_list_like(self.left_by):
self.left_by = [self.left_by]
if not is_list_like(self.right_by):
self.right_by = [self.right_by]
if len(self.left_by) != len(self.right_by):
raise MergeError('left_by and right_by must be same length')
self.left_on = self.left_by + list(self.left_on)
self.right_on = self.right_by + list(self.right_on)
# check 'direction' is valid
if self.direction not in ['backward', 'forward', 'nearest']:
raise MergeError('direction invalid: ' + self.direction)
@property
def _asof_key(self):
""" This is our asof key, the 'on' """
return self.left_on[-1]
def _get_merge_keys(self):
# note this function has side effects
(left_join_keys,
right_join_keys,
join_names) = super(_AsOfMerge, self)._get_merge_keys()
# validate index types are the same
for lk, rk in zip(left_join_keys, right_join_keys):
if not is_dtype_equal(lk.dtype, rk.dtype):
raise MergeError("incompatible merge keys, "
"must be the same type")
# validate tolerance; must be a Timedelta if we have a DTI
if self.tolerance is not None:
if self.left_index:
lt = self.left.index
else:
lt = left_join_keys[-1]
msg = "incompatible tolerance, must be compat " \
"with type {0}".format(type(lt))
if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt):
if not isinstance(self.tolerance, Timedelta):
raise MergeError(msg)
if self.tolerance < Timedelta(0):
raise MergeError("tolerance must be positive")
elif is_int64_dtype(lt):
if not is_integer(self.tolerance):
raise MergeError(msg)
if self.tolerance < 0:
raise MergeError("tolerance must be positive")
else:
raise MergeError("key must be integer or timestamp")
# validate allow_exact_matches
if not is_bool(self.allow_exact_matches):
raise MergeError("allow_exact_matches must be boolean, "
"passed {0}".format(self.allow_exact_matches))
return left_join_keys, right_join_keys, join_names
def _get_join_indexers(self):
""" return the join indexers """
def flip(xs):
""" unlike np.transpose, this returns an array of tuples """
labels = list(string.ascii_lowercase[:len(xs)])
dtypes = [x.dtype for x in xs]
labeled_dtypes = list(zip(labels, dtypes))
return np.array(lzip(*xs), labeled_dtypes)
# values to compare
left_values = (self.left.index.values if self.left_index else
self.left_join_keys[-1])
right_values = (self.right.index.values if self.right_index else
self.right_join_keys[-1])
tolerance = self.tolerance
# we required sortedness in the join keys
msg = " keys must be sorted"
if not Index(left_values).is_monotonic:
raise ValueError('left' + msg)
if not Index(right_values).is_monotonic:
raise ValueError('right' + msg)
# initial type conversion as needed
if needs_i8_conversion(left_values):
left_values = left_values.view('i8')
right_values = right_values.view('i8')
if tolerance is not None:
tolerance = tolerance.value
# a "by" parameter requires special handling
if self.left_by is not None:
# remove 'on' parameter from values if one existed
if self.left_index and self.right_index:
left_by_values = self.left_join_keys
right_by_values = self.right_join_keys
else:
left_by_values = self.left_join_keys[0:-1]
right_by_values = self.right_join_keys[0:-1]
# get tuple representation of values if more than one
if len(left_by_values) == 1:
left_by_values = left_by_values[0]
right_by_values = right_by_values[0]
else:
left_by_values = flip(left_by_values)
right_by_values = flip(right_by_values)
# upcast 'by' parameter because HashTable is limited
by_type = _get_cython_type_upcast(left_by_values.dtype)
by_type_caster = _type_casters[by_type]
left_by_values = by_type_caster(left_by_values)
right_by_values = by_type_caster(right_by_values)
# choose appropriate function by type
on_type = _get_cython_type(left_values.dtype)
func = _asof_by_function(self.direction, on_type, by_type)
return func(left_values,
right_values,
left_by_values,
right_by_values,
self.allow_exact_matches,
tolerance)
else:
# choose appropriate function by type
on_type = _get_cython_type(left_values.dtype)
func = _asof_function(self.direction, on_type)
return func(left_values,
right_values,
self.allow_exact_matches,
tolerance)
def _get_multiindex_indexer(join_keys, index, sort):
from functools import partial
# bind `sort` argument
fkeys = partial(_factorize_keys, sort=sort)
# left & right join labels and num. of levels at each location
rlab, llab, shape = map(list, zip(* map(fkeys, index.levels, join_keys)))
if sort:
rlab = list(map(np.take, rlab, index.labels))
else:
i8copy = lambda a: a.astype('i8', subok=False, copy=True)
rlab = list(map(i8copy, index.labels))
# fix right labels if there were any nulls
for i in range(len(join_keys)):
mask = index.labels[i] == -1
if mask.any():
# check if there already was any nulls at this location
# if there was, it is factorized to `shape[i] - 1`
a = join_keys[i][llab[i] == shape[i] - 1]
if a.size == 0 or not a[0] != a[0]:
shape[i] += 1
rlab[i][mask] = shape[i] - 1
# get flat i8 join keys
lkey, rkey = _get_join_keys(llab, rlab, shape, sort)
# factorize keys to a dense i8 space
lkey, rkey, count = fkeys(lkey, rkey)
return libjoin.left_outer_join(lkey, rkey, count, sort=sort)
def _get_single_indexer(join_key, index, sort=False):
left_key, right_key, count = _factorize_keys(join_key, index, sort=sort)
left_indexer, right_indexer = libjoin.left_outer_join(
_ensure_int64(left_key),
_ensure_int64(right_key),
count, sort=sort)
return left_indexer, right_indexer
def _left_join_on_index(left_ax, right_ax, join_keys, sort=False):
if len(join_keys) > 1:
if not ((isinstance(right_ax, MultiIndex) and
len(join_keys) == right_ax.nlevels)):
raise AssertionError("If more than one join key is given then "
"'right_ax' must be a MultiIndex and the "
"number of join keys must be the number of "
"levels in right_ax")
left_indexer, right_indexer = \
_get_multiindex_indexer(join_keys, right_ax, sort=sort)
else:
jkey = join_keys[0]
left_indexer, right_indexer = \
_get_single_indexer(jkey, right_ax, sort=sort)
if sort or len(left_ax) != len(left_indexer):
# if asked to sort or there are 1-to-many matches
join_index = left_ax.take(left_indexer)
return join_index, left_indexer, right_indexer
# left frame preserves order & length of its index
return left_ax, None, right_indexer
def _right_outer_join(x, y, max_groups):
right_indexer, left_indexer = libjoin.left_outer_join(y, x, max_groups)
return left_indexer, right_indexer
_join_functions = {
'inner': libjoin.inner_join,
'left': libjoin.left_outer_join,
'right': _right_outer_join,
'outer': libjoin.full_outer_join,
}
def _factorize_keys(lk, rk, sort=True):
if is_datetime64tz_dtype(lk) and is_datetime64tz_dtype(rk):
lk = lk.values
rk = rk.values
# if we exactly match in categories, allow us to use codes
if (is_categorical_dtype(lk) and
is_categorical_dtype(rk) and
lk.is_dtype_equal(rk)):
return lk.codes, rk.codes, len(lk.categories)
if is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk):
klass = libhashtable.Int64Factorizer
lk = _ensure_int64(com._values_from_object(lk))
rk = _ensure_int64(com._values_from_object(rk))
else:
klass = libhashtable.Factorizer
lk = _ensure_object(lk)
rk = _ensure_object(rk)
rizer = klass(max(len(lk), len(rk)))
llab = rizer.factorize(lk)
rlab = rizer.factorize(rk)
count = rizer.get_count()
if sort:
uniques = rizer.uniques.to_array()
llab, rlab = _sort_labels(uniques, llab, rlab)
# NA group
lmask = llab == -1
lany = lmask.any()
rmask = rlab == -1
rany = rmask.any()
if lany or rany:
if lany:
np.putmask(llab, lmask, count)
if rany:
np.putmask(rlab, rmask, count)
count += 1
return llab, rlab, count
def _sort_labels(uniques, left, right):
if not isinstance(uniques, np.ndarray):
# tuplesafe
uniques = Index(uniques).values
l = len(left)
labels = np.concatenate([left, right])
_, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1)
new_labels = _ensure_int64(new_labels)
new_left, new_right = new_labels[:l], new_labels[l:]
return new_left, new_right
def _get_join_keys(llab, rlab, shape, sort):
# how many levels can be done without overflow
pred = lambda i: not is_int64_overflow_possible(shape[:i])
nlev = next(filter(pred, range(len(shape), 0, -1)))
# get keys for the first `nlev` levels
stride = np.prod(shape[1:nlev], dtype='i8')
lkey = stride * llab[0].astype('i8', subok=False, copy=False)
rkey = stride * rlab[0].astype('i8', subok=False, copy=False)
for i in range(1, nlev):
stride //= shape[i]
lkey += llab[i] * stride
rkey += rlab[i] * stride
if nlev == len(shape): # all done!
return lkey, rkey
# densify current keys to avoid overflow
lkey, rkey, count = _factorize_keys(lkey, rkey, sort=sort)
llab = [lkey] + llab[nlev:]
rlab = [rkey] + rlab[nlev:]
shape = [count] + shape[nlev:]
return _get_join_keys(llab, rlab, shape, sort)
def _should_fill(lname, rname):
if (not isinstance(lname, compat.string_types) or
not isinstance(rname, compat.string_types)):
return True
return lname == rname
def _any(x):
return x is not None and len(x) > 0 and any([y is not None for y in x])
| mit |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/django-1.8.2/django/template/base.py | 37 | 52380 | """
This is the Django template system.
How it works:
The Lexer.tokenize() function converts a template string (i.e., a string containing
markup with custom template tags) to tokens, which can be either plain text
(TOKEN_TEXT), variables (TOKEN_VAR) or block statements (TOKEN_BLOCK).
The Parser() class takes a list of tokens in its constructor, and its parse()
method returns a compiled template -- which is, under the hood, a list of
Node objects.
Each Node is responsible for creating some sort of output -- e.g. simple text
(TextNode), variable values in a given context (VariableNode), results of basic
logic (IfNode), results of looping (ForNode), or anything else. The core Node
types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can
define their own custom node types.
Each Node has a render() method, which takes a Context and returns a string of
the rendered node. For example, the render() method of a Variable Node returns
the variable's value as a string. The render() method of a ForNode returns the
rendered output of whatever was inside the loop, recursively.
The Template class is a convenient wrapper that takes care of template
compilation and rendering.
Usage:
The only thing you should ever use directly in this file is the Template class.
Create a compiled template object with a template_string, then call render()
with a context. In the compilation stage, the TemplateSyntaxError exception
will be raised if the template doesn't have proper syntax.
Sample code:
>>> from django import template
>>> s = u'<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>'
>>> t = template.Template(s)
(t is now a compiled template, and its render() method can be called multiple
times with multiple contexts)
>>> c = template.Context({'test':True, 'varvalue': 'Hello'})
>>> t.render(c)
u'<html><h1>Hello</h1></html>'
>>> c = template.Context({'test':False, 'varvalue': 'Hello'})
>>> t.render(c)
u'<html></html>'
"""
from __future__ import unicode_literals
import re
import warnings
from functools import partial
from importlib import import_module
from inspect import getargspec, getcallargs
from django.apps import apps
from django.template.context import ( # NOQA: imported for backwards compatibility
BaseContext, Context, ContextPopException, RequestContext,
)
from django.utils import lru_cache, six
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import (
force_str, force_text, python_2_unicode_compatible,
)
from django.utils.formats import localize
from django.utils.html import conditional_escape
from django.utils.itercompat import is_iterable
from django.utils.module_loading import module_has_submodule
from django.utils.safestring import (
EscapeData, SafeData, mark_for_escaping, mark_safe,
)
from django.utils.text import (
get_text_list, smart_split, unescape_string_literal,
)
from django.utils.timezone import template_localtime
from django.utils.translation import pgettext_lazy, ugettext_lazy
TOKEN_TEXT = 0
TOKEN_VAR = 1
TOKEN_BLOCK = 2
TOKEN_COMMENT = 3
TOKEN_MAPPING = {
TOKEN_TEXT: 'Text',
TOKEN_VAR: 'Var',
TOKEN_BLOCK: 'Block',
TOKEN_COMMENT: 'Comment',
}
# template syntax constants
FILTER_SEPARATOR = '|'
FILTER_ARGUMENT_SEPARATOR = ':'
VARIABLE_ATTRIBUTE_SEPARATOR = '.'
BLOCK_TAG_START = '{%'
BLOCK_TAG_END = '%}'
VARIABLE_TAG_START = '{{'
VARIABLE_TAG_END = '}}'
COMMENT_TAG_START = '{#'
COMMENT_TAG_END = '#}'
TRANSLATOR_COMMENT_MARK = 'Translators'
SINGLE_BRACE_START = '{'
SINGLE_BRACE_END = '}'
ALLOWED_VARIABLE_CHARS = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.')
# what to report as the origin for templates that come from non-loader sources
# (e.g. strings)
UNKNOWN_SOURCE = '<unknown source>'
# match a variable or block tag and capture the entire tag, including start/end
# delimiters
tag_re = (re.compile('(%s.*?%s|%s.*?%s|%s.*?%s)' %
(re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END),
re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END),
re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END))))
# global dictionary of libraries that have been loaded using get_library
libraries = {}
# global list of libraries to load by default for a new parser
builtins = []
class TemplateSyntaxError(Exception):
pass
class TemplateDoesNotExist(Exception):
pass
class TemplateEncodingError(Exception):
pass
@python_2_unicode_compatible
class VariableDoesNotExist(Exception):
def __init__(self, msg, params=()):
self.msg = msg
self.params = params
def __str__(self):
return self.msg % tuple(force_text(p, errors='replace') for p in self.params)
class InvalidTemplateLibrary(Exception):
pass
class Origin(object):
def __init__(self, name):
self.name = name
def reload(self):
raise NotImplementedError('subclasses of Origin must provide a reload() method')
def __str__(self):
return self.name
class StringOrigin(Origin):
def __init__(self, source):
super(StringOrigin, self).__init__(UNKNOWN_SOURCE)
self.source = source
def reload(self):
return self.source
class Template(object):
def __init__(self, template_string, origin=None, name=None, engine=None):
try:
template_string = force_text(template_string)
except UnicodeDecodeError:
raise TemplateEncodingError("Templates can only be constructed "
"from unicode or UTF-8 strings.")
# If Template is instantiated directly rather than from an Engine and
# exactly one Django template engine is configured, use that engine.
# This is required to preserve backwards-compatibility for direct use
# e.g. Template('...').render(Context({...}))
if engine is None:
from .engine import Engine
engine = Engine.get_default()
if engine.debug and origin is None:
origin = StringOrigin(template_string)
self.nodelist = engine.compile_string(template_string, origin)
self.name = name
self.origin = origin
self.engine = engine
def __iter__(self):
for node in self.nodelist:
for subnode in node:
yield subnode
def _render(self, context):
return self.nodelist.render(context)
def render(self, context):
"Display stage -- can be called many times"
context.render_context.push()
try:
if context.template is None:
with context.bind_template(self):
return self._render(context)
else:
return self._render(context)
finally:
context.render_context.pop()
class Token(object):
def __init__(self, token_type, contents):
# token_type must be TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK or
# TOKEN_COMMENT.
self.token_type, self.contents = token_type, contents
self.lineno = None
def __str__(self):
token_name = TOKEN_MAPPING[self.token_type]
return ('<%s token: "%s...">' %
(token_name, self.contents[:20].replace('\n', '')))
def split_contents(self):
split = []
bits = iter(smart_split(self.contents))
for bit in bits:
# Handle translation-marked template pieces
if bit.startswith('_("') or bit.startswith("_('"):
sentinal = bit[2] + ')'
trans_bit = [bit]
while not bit.endswith(sentinal):
bit = next(bits)
trans_bit.append(bit)
bit = ' '.join(trans_bit)
split.append(bit)
return split
class Lexer(object):
def __init__(self, template_string, origin):
self.template_string = template_string
self.origin = origin
self.lineno = 1
self.verbatim = False
def tokenize(self):
"""
Return a list of tokens from a given template_string.
"""
in_tag = False
result = []
for bit in tag_re.split(self.template_string):
if bit:
result.append(self.create_token(bit, in_tag))
in_tag = not in_tag
return result
def create_token(self, token_string, in_tag):
"""
Convert the given token string into a new Token object and return it.
If in_tag is True, we are processing something that matched a tag,
otherwise it should be treated as a literal string.
"""
if in_tag and token_string.startswith(BLOCK_TAG_START):
# The [2:-2] ranges below strip off *_TAG_START and *_TAG_END.
# We could do len(BLOCK_TAG_START) to be more "correct", but we've
# hard-coded the 2s here for performance. And it's not like
# the TAG_START values are going to change anytime, anyway.
block_content = token_string[2:-2].strip()
if self.verbatim and block_content == self.verbatim:
self.verbatim = False
if in_tag and not self.verbatim:
if token_string.startswith(VARIABLE_TAG_START):
token = Token(TOKEN_VAR, token_string[2:-2].strip())
elif token_string.startswith(BLOCK_TAG_START):
if block_content[:9] in ('verbatim', 'verbatim '):
self.verbatim = 'end%s' % block_content
token = Token(TOKEN_BLOCK, block_content)
elif token_string.startswith(COMMENT_TAG_START):
content = ''
if token_string.find(TRANSLATOR_COMMENT_MARK):
content = token_string[2:-2].strip()
token = Token(TOKEN_COMMENT, content)
else:
token = Token(TOKEN_TEXT, token_string)
token.lineno = self.lineno
self.lineno += token_string.count('\n')
return token
class Parser(object):
def __init__(self, tokens):
self.tokens = tokens
self.tags = {}
self.filters = {}
for lib in builtins:
self.add_library(lib)
def parse(self, parse_until=None):
if parse_until is None:
parse_until = []
nodelist = self.create_nodelist()
while self.tokens:
token = self.next_token()
# Use the raw values here for TOKEN_* for a tiny performance boost.
if token.token_type == 0: # TOKEN_TEXT
self.extend_nodelist(nodelist, TextNode(token.contents), token)
elif token.token_type == 1: # TOKEN_VAR
if not token.contents:
self.empty_variable(token)
try:
filter_expression = self.compile_filter(token.contents)
except TemplateSyntaxError as e:
if not self.compile_filter_error(token, e):
raise
var_node = self.create_variable_node(filter_expression)
self.extend_nodelist(nodelist, var_node, token)
elif token.token_type == 2: # TOKEN_BLOCK
try:
command = token.contents.split()[0]
except IndexError:
self.empty_block_tag(token)
if command in parse_until:
# put token back on token list so calling
# code knows why it terminated
self.prepend_token(token)
return nodelist
# execute callback function for this tag and append
# resulting node
self.enter_command(command, token)
try:
compile_func = self.tags[command]
except KeyError:
self.invalid_block_tag(token, command, parse_until)
try:
compiled_result = compile_func(self, token)
except TemplateSyntaxError as e:
if not self.compile_function_error(token, e):
raise
self.extend_nodelist(nodelist, compiled_result, token)
self.exit_command()
if parse_until:
self.unclosed_block_tag(parse_until)
return nodelist
def skip_past(self, endtag):
while self.tokens:
token = self.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == endtag:
return
self.unclosed_block_tag([endtag])
def create_variable_node(self, filter_expression):
return VariableNode(filter_expression)
def create_nodelist(self):
return NodeList()
def extend_nodelist(self, nodelist, node, token):
if node.must_be_first and nodelist:
try:
if nodelist.contains_nontext:
raise AttributeError
except AttributeError:
raise TemplateSyntaxError("%r must be the first tag "
"in the template." % node)
if isinstance(nodelist, NodeList) and not isinstance(node, TextNode):
nodelist.contains_nontext = True
nodelist.append(node)
def enter_command(self, command, token):
pass
def exit_command(self):
pass
def error(self, token, msg):
return TemplateSyntaxError(msg)
def empty_variable(self, token):
raise self.error(token, "Empty variable tag")
def empty_block_tag(self, token):
raise self.error(token, "Empty block tag")
def invalid_block_tag(self, token, command, parse_until=None):
if parse_until:
raise self.error(token, "Invalid block tag: '%s', expected %s" %
(command, get_text_list(["'%s'" % p for p in parse_until])))
raise self.error(token, "Invalid block tag: '%s'" % command)
def unclosed_block_tag(self, parse_until):
raise self.error(None, "Unclosed tags: %s " % ', '.join(parse_until))
def compile_filter_error(self, token, e):
pass
def compile_function_error(self, token, e):
pass
def next_token(self):
return self.tokens.pop(0)
def prepend_token(self, token):
self.tokens.insert(0, token)
def delete_first_token(self):
del self.tokens[0]
def add_library(self, lib):
self.tags.update(lib.tags)
self.filters.update(lib.filters)
def compile_filter(self, token):
"""
Convenient wrapper for FilterExpression
"""
return FilterExpression(token, self)
def find_filter(self, filter_name):
if filter_name in self.filters:
return self.filters[filter_name]
else:
raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name)
class TokenParser(object):
"""
Subclass this and implement the top() method to parse a template line.
When instantiating the parser, pass in the line from the Django template
parser.
The parser's "tagname" instance-variable stores the name of the tag that
the filter was called with.
"""
def __init__(self, subject):
self.subject = subject
self.pointer = 0
self.backout = []
self.tagname = self.tag()
def top(self):
"""
Overload this method to do the actual parsing and return the result.
"""
raise NotImplementedError('subclasses of Tokenparser must provide a top() method')
def more(self):
"""
Returns True if there is more stuff in the tag.
"""
return self.pointer < len(self.subject)
def back(self):
"""
Undoes the last microparser. Use this for lookahead and backtracking.
"""
if not len(self.backout):
raise TemplateSyntaxError("back called without some previous "
"parsing")
self.pointer = self.backout.pop()
def tag(self):
"""
A microparser that just returns the next tag from the line.
"""
subject = self.subject
i = self.pointer
if i >= len(subject):
raise TemplateSyntaxError("expected another tag, found "
"end of string: %s" % subject)
p = i
while i < len(subject) and subject[i] not in (' ', '\t'):
i += 1
s = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return s
def value(self):
"""
A microparser that parses for a value: some string constant or
variable name.
"""
subject = self.subject
i = self.pointer
def next_space_index(subject, i):
"""
Increment pointer until a real space (i.e. a space not within
quotes) is encountered
"""
while i < len(subject) and subject[i] not in (' ', '\t'):
if subject[i] in ('"', "'"):
c = subject[i]
i += 1
while i < len(subject) and subject[i] != c:
i += 1
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. "
"Unexpected end of string in column %d: %s" %
(i, subject))
i += 1
return i
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. Expected another "
"value but found end of string: %s" %
subject)
if subject[i] in ('"', "'"):
p = i
i += 1
while i < len(subject) and subject[i] != subject[p]:
i += 1
if i >= len(subject):
raise TemplateSyntaxError("Searching for value. Unexpected "
"end of string in column %d: %s" %
(i, subject))
i += 1
# Continue parsing until next "real" space,
# so that filters are also included
i = next_space_index(subject, i)
res = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return res
else:
p = i
i = next_space_index(subject, i)
s = subject[p:i]
while i < len(subject) and subject[i] in (' ', '\t'):
i += 1
self.backout.append(self.pointer)
self.pointer = i
return s
# This only matches constant *strings* (things in quotes or marked for
# translation). Numbers are treated as variables for implementation reasons
# (so that they retain their type when passed to filters).
constant_string = r"""
(?:%(i18n_open)s%(strdq)s%(i18n_close)s|
%(i18n_open)s%(strsq)s%(i18n_close)s|
%(strdq)s|
%(strsq)s)
""" % {
'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string
'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string
'i18n_open': re.escape("_("),
'i18n_close': re.escape(")"),
}
constant_string = constant_string.replace("\n", "")
filter_raw_string = r"""
^(?P<constant>%(constant)s)|
^(?P<var>[%(var_chars)s]+|%(num)s)|
(?:\s*%(filter_sep)s\s*
(?P<filter_name>\w+)
(?:%(arg_sep)s
(?:
(?P<constant_arg>%(constant)s)|
(?P<var_arg>[%(var_chars)s]+|%(num)s)
)
)?
)""" % {
'constant': constant_string,
'num': r'[-+\.]?\d[\d\.e]*',
'var_chars': "\w\.",
'filter_sep': re.escape(FILTER_SEPARATOR),
'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR),
}
filter_re = re.compile(filter_raw_string, re.UNICODE | re.VERBOSE)
class FilterExpression(object):
"""
Parses a variable token and its optional filters (all as a single string),
and return a list of tuples of the filter name and arguments.
Sample::
>>> token = 'variable|default:"Default value"|date:"Y-m-d"'
>>> p = Parser('')
>>> fe = FilterExpression(token, p)
>>> len(fe.filters)
2
>>> fe.var
<Variable: 'variable'>
"""
def __init__(self, token, parser):
self.token = token
matches = filter_re.finditer(token)
var_obj = None
filters = []
upto = 0
for match in matches:
start = match.start()
if upto != start:
raise TemplateSyntaxError("Could not parse some characters: "
"%s|%s|%s" %
(token[:upto], token[upto:start],
token[start:]))
if var_obj is None:
var, constant = match.group("var", "constant")
if constant:
try:
var_obj = Variable(constant).resolve({})
except VariableDoesNotExist:
var_obj = None
elif var is None:
raise TemplateSyntaxError("Could not find variable at "
"start of %s." % token)
else:
var_obj = Variable(var)
else:
filter_name = match.group("filter_name")
args = []
constant_arg, var_arg = match.group("constant_arg", "var_arg")
if constant_arg:
args.append((False, Variable(constant_arg).resolve({})))
elif var_arg:
args.append((True, Variable(var_arg)))
filter_func = parser.find_filter(filter_name)
self.args_check(filter_name, filter_func, args)
filters.append((filter_func, args))
upto = match.end()
if upto != len(token):
raise TemplateSyntaxError("Could not parse the remainder: '%s' "
"from '%s'" % (token[upto:], token))
self.filters = filters
self.var = var_obj
def resolve(self, context, ignore_failures=False):
if isinstance(self.var, Variable):
try:
obj = self.var.resolve(context)
except VariableDoesNotExist:
if ignore_failures:
obj = None
else:
string_if_invalid = context.template.engine.string_if_invalid
if string_if_invalid:
if '%s' in string_if_invalid:
return string_if_invalid % self.var
else:
return string_if_invalid
else:
obj = string_if_invalid
else:
obj = self.var
for func, args in self.filters:
arg_vals = []
for lookup, arg in args:
if not lookup:
arg_vals.append(mark_safe(arg))
else:
arg_vals.append(arg.resolve(context))
if getattr(func, 'expects_localtime', False):
obj = template_localtime(obj, context.use_tz)
if getattr(func, 'needs_autoescape', False):
new_obj = func(obj, autoescape=context.autoescape, *arg_vals)
else:
new_obj = func(obj, *arg_vals)
if getattr(func, 'is_safe', False) and isinstance(obj, SafeData):
obj = mark_safe(new_obj)
elif isinstance(obj, EscapeData):
obj = mark_for_escaping(new_obj)
else:
obj = new_obj
return obj
def args_check(name, func, provided):
provided = list(provided)
# First argument, filter input, is implied.
plen = len(provided) + 1
# Check to see if a decorator is providing the real function.
func = getattr(func, '_decorated_function', func)
args, varargs, varkw, defaults = getargspec(func)
alen = len(args)
dlen = len(defaults or [])
# Not enough OR Too many
if plen < (alen - dlen) or plen > alen:
raise TemplateSyntaxError("%s requires %d arguments, %d provided" %
(name, alen - dlen, plen))
return True
args_check = staticmethod(args_check)
def __str__(self):
return self.token
def resolve_variable(path, context):
"""
Returns the resolved variable, which may contain attribute syntax, within
the given context.
Deprecated; use the Variable class instead.
"""
warnings.warn("resolve_variable() is deprecated. Use django.template."
"Variable(path).resolve(context) instead",
RemovedInDjango20Warning, stacklevel=2)
return Variable(path).resolve(context)
class Variable(object):
"""
A template variable, resolvable against a given context. The variable may
be a hard-coded string (if it begins and ends with single or double quote
marks)::
>>> c = {'article': {'section':u'News'}}
>>> Variable('article.section').resolve(c)
u'News'
>>> Variable('article').resolve(c)
{'section': u'News'}
>>> class AClass: pass
>>> c = AClass()
>>> c.article = AClass()
>>> c.article.section = u'News'
(The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.')
"""
def __init__(self, var):
self.var = var
self.literal = None
self.lookups = None
self.translate = False
self.message_context = None
if not isinstance(var, six.string_types):
raise TypeError(
"Variable must be a string or number, got %s" % type(var))
try:
# First try to treat this variable as a number.
#
# Note that this could cause an OverflowError here that we're not
# catching. Since this should only happen at compile time, that's
# probably OK.
self.literal = float(var)
# So it's a float... is it an int? If the original value contained a
# dot or an "e" then it was a float, not an int.
if '.' not in var and 'e' not in var.lower():
self.literal = int(self.literal)
# "2." is invalid
if var.endswith('.'):
raise ValueError
except ValueError:
# A ValueError means that the variable isn't a number.
if var.startswith('_(') and var.endswith(')'):
# The result of the lookup should be translated at rendering
# time.
self.translate = True
var = var[2:-1]
# If it's wrapped with quotes (single or double), then
# we're also dealing with a literal.
try:
self.literal = mark_safe(unescape_string_literal(var))
except ValueError:
# Otherwise we'll set self.lookups so that resolve() knows we're
# dealing with a bonafide variable
if var.find(VARIABLE_ATTRIBUTE_SEPARATOR + '_') > -1 or var[0] == '_':
raise TemplateSyntaxError("Variables and attributes may "
"not begin with underscores: '%s'" %
var)
self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR))
def resolve(self, context):
"""Resolve this variable against a given context."""
if self.lookups is not None:
# We're dealing with a variable that needs to be resolved
value = self._resolve_lookup(context)
else:
# We're dealing with a literal, so it's already been "resolved"
value = self.literal
if self.translate:
if self.message_context:
return pgettext_lazy(self.message_context, value)
else:
return ugettext_lazy(value)
return value
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.var)
def __str__(self):
return self.var
def _resolve_lookup(self, context):
"""
Performs resolution of a real variable (i.e. not a literal) against the
given context.
As indicated by the method's name, this method is an implementation
detail and shouldn't be called by external code. Use Variable.resolve()
instead.
"""
current = context
try: # catch-all for silent variable failures
for bit in self.lookups:
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup on
# numpy < 1.9 and 1.9+ respectively
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try: # attribute lookup
# Don't return class attributes if the class is the context:
if isinstance(current, BaseContext) and getattr(type(current), bit):
raise AttributeError
current = getattr(current, bit)
except (TypeError, AttributeError) as e:
# Reraise an AttributeError raised by a @property
if (isinstance(e, AttributeError) and
not isinstance(current, BaseContext) and bit in dir(current)):
raise
try: # list-index lookup
current = current[int(bit)]
except (IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError): # unsubscriptable object
raise VariableDoesNotExist("Failed lookup for key "
"[%s] in %r",
(bit, current)) # missing attribute
if callable(current):
if getattr(current, 'do_not_call_in_templates', False):
pass
elif getattr(current, 'alters_data', False):
current = context.template.engine.string_if_invalid
else:
try: # method call (assuming no args required)
current = current()
except TypeError:
try:
getcallargs(current)
except TypeError: # arguments *were* required
current = context.template.engine.string_if_invalid # invalid method call
else:
raise
except Exception as e:
if getattr(e, 'silent_variable_failure', False):
current = context.template.engine.string_if_invalid
else:
raise
return current
class Node(object):
# Set this to True for nodes that must be first in the template (although
# they can be preceded by text nodes.
must_be_first = False
child_nodelists = ('nodelist',)
def render(self, context):
"""
Return the node rendered as a string.
"""
pass
def __iter__(self):
yield self
def get_nodes_by_type(self, nodetype):
"""
Return a list of all nodes (within this node and its nodelist)
of the given type
"""
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
for attr in self.child_nodelists:
nodelist = getattr(self, attr, None)
if nodelist:
nodes.extend(nodelist.get_nodes_by_type(nodetype))
return nodes
class NodeList(list):
# Set to True the first time a non-TextNode is inserted by
# extend_nodelist().
contains_nontext = False
def render(self, context):
bits = []
for node in self:
if isinstance(node, Node):
bit = self.render_node(node, context)
else:
bit = node
bits.append(force_text(bit))
return mark_safe(''.join(bits))
def get_nodes_by_type(self, nodetype):
"Return a list of all nodes of the given type"
nodes = []
for node in self:
nodes.extend(node.get_nodes_by_type(nodetype))
return nodes
def render_node(self, node, context):
return node.render(context)
class TextNode(Node):
def __init__(self, s):
self.s = s
def __repr__(self):
return force_str("<Text Node: '%s'>" % self.s[:25], 'ascii',
errors='replace')
def render(self, context):
return self.s
def render_value_in_context(value, context):
"""
Converts any value to a string to become part of a rendered template. This
means escaping, if required, and conversion to a unicode object. If value
is a string, it is expected to have already been translated.
"""
value = template_localtime(value, use_tz=context.use_tz)
value = localize(value, use_l10n=context.use_l10n)
value = force_text(value)
if ((context.autoescape and not isinstance(value, SafeData)) or
isinstance(value, EscapeData)):
return conditional_escape(value)
else:
return value
class VariableNode(Node):
def __init__(self, filter_expression):
self.filter_expression = filter_expression
def __repr__(self):
return "<Variable Node: %s>" % self.filter_expression
def render(self, context):
try:
output = self.filter_expression.resolve(context)
except UnicodeDecodeError:
# Unicode conversion can fail sometimes for reasons out of our
# control (e.g. exception rendering). In that case, we fail
# quietly.
return ''
return render_value_in_context(output, context)
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
def token_kwargs(bits, parser, support_legacy=False):
"""
A utility method for parsing token keyword arguments.
:param bits: A list containing remainder of the token (split by spaces)
that is to be checked for arguments. Valid arguments will be removed
from this list.
:param support_legacy: If set to true ``True``, the legacy format
``1 as foo`` will be accepted. Otherwise, only the standard ``foo=1``
format is allowed.
:returns: A dictionary of the arguments retrieved from the ``bits`` token
list.
There is no requirement for all remaining token ``bits`` to be keyword
arguments, so the dictionary will be returned as soon as an invalid
argument format is reached.
"""
if not bits:
return {}
match = kwarg_re.match(bits[0])
kwarg_format = match and match.group(1)
if not kwarg_format:
if not support_legacy:
return {}
if len(bits) < 3 or bits[1] != 'as':
return {}
kwargs = {}
while bits:
if kwarg_format:
match = kwarg_re.match(bits[0])
if not match or not match.group(1):
return kwargs
key, value = match.groups()
del bits[:1]
else:
if len(bits) < 3 or bits[1] != 'as':
return kwargs
key, value = bits[2], bits[0]
del bits[:3]
kwargs[key] = parser.compile_filter(value)
if bits and not kwarg_format:
if bits[0] != 'and':
return kwargs
del bits[:1]
return kwargs
def parse_bits(parser, bits, params, varargs, varkw, defaults,
takes_context, name):
"""
Parses bits for template tag helpers (simple_tag, include_tag and
assignment_tag), in particular by detecting syntax errors and by
extracting positional and keyword arguments.
"""
if takes_context:
if params[0] == 'context':
params = params[1:]
else:
raise TemplateSyntaxError(
"'%s' is decorated with takes_context=True so it must "
"have a first argument of 'context'" % name)
args = []
kwargs = {}
unhandled_params = list(params)
for bit in bits:
# First we try to extract a potential kwarg from the bit
kwarg = token_kwargs([bit], parser)
if kwarg:
# The kwarg was successfully extracted
param, value = list(six.iteritems(kwarg))[0]
if param not in params and varkw is None:
# An unexpected keyword argument was supplied
raise TemplateSyntaxError(
"'%s' received unexpected keyword argument '%s'" %
(name, param))
elif param in kwargs:
# The keyword argument has already been supplied once
raise TemplateSyntaxError(
"'%s' received multiple values for keyword argument '%s'" %
(name, param))
else:
# All good, record the keyword argument
kwargs[str(param)] = value
if param in unhandled_params:
# If using the keyword syntax for a positional arg, then
# consume it.
unhandled_params.remove(param)
else:
if kwargs:
raise TemplateSyntaxError(
"'%s' received some positional argument(s) after some "
"keyword argument(s)" % name)
else:
# Record the positional argument
args.append(parser.compile_filter(bit))
try:
# Consume from the list of expected positional arguments
unhandled_params.pop(0)
except IndexError:
if varargs is None:
raise TemplateSyntaxError(
"'%s' received too many positional arguments" %
name)
if defaults is not None:
# Consider the last n params handled, where n is the
# number of defaults.
unhandled_params = unhandled_params[:-len(defaults)]
if unhandled_params:
# Some positional arguments were not supplied
raise TemplateSyntaxError(
"'%s' did not receive value(s) for the argument(s): %s" %
(name, ", ".join("'%s'" % p for p in unhandled_params)))
return args, kwargs
def generic_tag_compiler(parser, token, params, varargs, varkw, defaults,
name, takes_context, node_class):
"""
Returns a template.Node subclass.
"""
bits = token.split_contents()[1:]
args, kwargs = parse_bits(parser, bits, params, varargs, varkw,
defaults, takes_context, name)
return node_class(takes_context, args, kwargs)
class TagHelperNode(Node):
"""
Base class for tag helper nodes such as SimpleNode, InclusionNode and
AssignmentNode. Manages the positional and keyword arguments to be passed
to the decorated function.
"""
def __init__(self, takes_context, args, kwargs):
self.takes_context = takes_context
self.args = args
self.kwargs = kwargs
def get_resolved_arguments(self, context):
resolved_args = [var.resolve(context) for var in self.args]
if self.takes_context:
resolved_args = [context] + resolved_args
resolved_kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()}
return resolved_args, resolved_kwargs
class Library(object):
def __init__(self):
self.filters = {}
self.tags = {}
def tag(self, name=None, compile_function=None):
if name is None and compile_function is None:
# @register.tag()
return self.tag_function
elif name is not None and compile_function is None:
if callable(name):
# @register.tag
return self.tag_function(name)
else:
# @register.tag('somename') or @register.tag(name='somename')
def dec(func):
return self.tag(name, func)
return dec
elif name is not None and compile_function is not None:
# register.tag('somename', somefunc)
self.tags[name] = compile_function
return compile_function
else:
raise InvalidTemplateLibrary("Unsupported arguments to "
"Library.tag: (%r, %r)", (name, compile_function))
def tag_function(self, func):
self.tags[getattr(func, "_decorated_function", func).__name__] = func
return func
def filter(self, name=None, filter_func=None, **flags):
if name is None and filter_func is None:
# @register.filter()
def dec(func):
return self.filter_function(func, **flags)
return dec
elif name is not None and filter_func is None:
if callable(name):
# @register.filter
return self.filter_function(name, **flags)
else:
# @register.filter('somename') or @register.filter(name='somename')
def dec(func):
return self.filter(name, func, **flags)
return dec
elif name is not None and filter_func is not None:
# register.filter('somename', somefunc)
self.filters[name] = filter_func
for attr in ('expects_localtime', 'is_safe', 'needs_autoescape'):
if attr in flags:
value = flags[attr]
# set the flag on the filter for FilterExpression.resolve
setattr(filter_func, attr, value)
# set the flag on the innermost decorated function
# for decorators that need it e.g. stringfilter
if hasattr(filter_func, "_decorated_function"):
setattr(filter_func._decorated_function, attr, value)
filter_func._filter_name = name
return filter_func
else:
raise InvalidTemplateLibrary("Unsupported arguments to "
"Library.filter: (%r, %r)", (name, filter_func))
def filter_function(self, func, **flags):
name = getattr(func, "_decorated_function", func).__name__
return self.filter(name, func, **flags)
def simple_tag(self, func=None, takes_context=None, name=None):
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class SimpleNode(TagHelperNode):
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
return func(*resolved_args, **resolved_kwargs)
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
compile_func = partial(generic_tag_compiler,
params=params, varargs=varargs, varkw=varkw,
defaults=defaults, name=function_name,
takes_context=takes_context, node_class=SimpleNode)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
# @register.simple_tag(...)
return dec
elif callable(func):
# @register.simple_tag
return dec(func)
else:
raise TemplateSyntaxError("Invalid arguments provided to simple_tag")
def assignment_tag(self, func=None, takes_context=None, name=None):
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class AssignmentNode(TagHelperNode):
def __init__(self, takes_context, args, kwargs, target_var):
super(AssignmentNode, self).__init__(takes_context, args, kwargs)
self.target_var = target_var
def render(self, context):
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
context[self.target_var] = func(*resolved_args, **resolved_kwargs)
return ''
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
def compile_func(parser, token):
bits = token.split_contents()[1:]
if len(bits) < 2 or bits[-2] != 'as':
raise TemplateSyntaxError(
"'%s' tag takes at least 2 arguments and the "
"second last argument must be 'as'" % function_name)
target_var = bits[-1]
bits = bits[:-2]
args, kwargs = parse_bits(parser, bits, params,
varargs, varkw, defaults, takes_context, function_name)
return AssignmentNode(takes_context, args, kwargs, target_var)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
if func is None:
# @register.assignment_tag(...)
return dec
elif callable(func):
# @register.assignment_tag
return dec(func)
else:
raise TemplateSyntaxError("Invalid arguments provided to assignment_tag")
def inclusion_tag(self, file_name, takes_context=False, name=None):
def dec(func):
params, varargs, varkw, defaults = getargspec(func)
class InclusionNode(TagHelperNode):
def render(self, context):
"""
Renders the specified template and context. Caches the
template object in render_context to avoid reparsing and
loading when used in a for loop.
"""
resolved_args, resolved_kwargs = self.get_resolved_arguments(context)
_dict = func(*resolved_args, **resolved_kwargs)
t = context.render_context.get(self)
if t is None:
if isinstance(file_name, Template):
t = file_name
elif isinstance(getattr(file_name, 'template', None), Template):
t = file_name.template
elif not isinstance(file_name, six.string_types) and is_iterable(file_name):
t = context.template.engine.select_template(file_name)
else:
t = context.template.engine.get_template(file_name)
context.render_context[self] = t
new_context = context.new(_dict)
# Copy across the CSRF token, if present, because
# inclusion tags are often used for forms, and we need
# instructions for using CSRF protection to be as simple
# as possible.
csrf_token = context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return t.render(new_context)
function_name = (name or
getattr(func, '_decorated_function', func).__name__)
compile_func = partial(generic_tag_compiler,
params=params, varargs=varargs, varkw=varkw,
defaults=defaults, name=function_name,
takes_context=takes_context, node_class=InclusionNode)
compile_func.__doc__ = func.__doc__
self.tag(function_name, compile_func)
return func
return dec
def is_library_missing(name):
"""Check if library that failed to load cannot be found under any
templatetags directory or does exist but fails to import.
Non-existing condition is checked recursively for each subpackage in cases
like <appdir>/templatetags/subpackage/package/module.py.
"""
# Don't bother to check if '.' is in name since any name will be prefixed
# with some template root.
path, module = name.rsplit('.', 1)
try:
package = import_module(path)
return not module_has_submodule(package, module)
except ImportError:
return is_library_missing(path)
def import_library(taglib_module):
"""
Load a template tag library module.
Verifies that the library contains a 'register' attribute, and
returns that attribute as the representation of the library
"""
try:
mod = import_module(taglib_module)
except ImportError as e:
# If the ImportError is because the taglib submodule does not exist,
# that's not an error that should be raised. If the submodule exists
# and raised an ImportError on the attempt to load it, that we want
# to raise.
if is_library_missing(taglib_module):
return None
else:
raise InvalidTemplateLibrary("ImportError raised loading %s: %s" %
(taglib_module, e))
try:
return mod.register
except AttributeError:
raise InvalidTemplateLibrary("Template library %s does not have "
"a variable named 'register'" %
taglib_module)
@lru_cache.lru_cache()
def get_templatetags_modules():
"""
Return the list of all available template tag modules.
Caches the result for faster access.
"""
templatetags_modules_candidates = ['django.templatetags']
templatetags_modules_candidates.extend(
'%s.templatetags' % app_config.name
for app_config in apps.get_app_configs())
templatetags_modules = []
for templatetag_module in templatetags_modules_candidates:
try:
import_module(templatetag_module)
except ImportError:
continue
else:
templatetags_modules.append(templatetag_module)
return templatetags_modules
def get_library(library_name):
"""
Load the template library module with the given name.
If library is not already loaded loop over all templatetags modules
to locate it.
{% load somelib %} and {% load someotherlib %} loops twice.
Subsequent loads eg. {% load somelib %} in the same process will grab
the cached module from libraries.
"""
lib = libraries.get(library_name, None)
if not lib:
templatetags_modules = get_templatetags_modules()
tried_modules = []
for module in templatetags_modules:
taglib_module = '%s.%s' % (module, library_name)
tried_modules.append(taglib_module)
lib = import_library(taglib_module)
if lib:
libraries[library_name] = lib
break
if not lib:
raise InvalidTemplateLibrary("Template library %s not found, "
"tried %s" %
(library_name,
','.join(tried_modules)))
return lib
def add_to_builtins(module):
builtins.append(import_library(module))
add_to_builtins('django.template.defaulttags')
add_to_builtins('django.template.defaultfilters')
add_to_builtins('django.template.loader_tags')
| mit |
PawarPawan/h2o-v3 | h2o-py/tests/testdir_algos/gbm/pyunit_weightsGBM.py | 2 | 6140 | import sys
sys.path.insert(1, "../../../")
import h2o
import random
import copy
def weights_check(ip,port):
def check_same(data1, data2, min_rows_scale):
gbm1_regression = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y="economy",
training_frame=data1,
min_rows=5,
ntrees=5,
max_depth=5)
gbm2_regression = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["economy"],
min_rows=5*min_rows_scale,
weights_column=data2["weights"],
ntrees=5,
max_depth=5)
gbm1_binomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["economy_20mpg"],
min_rows=5,
distribution="bernoulli",
ntrees=5,
max_depth=5)
gbm2_binomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["economy_20mpg"],
weights_column="weights",
training_frame=data2,
min_rows=5*min_rows_scale,
distribution="bernoulli",
ntrees=5,
max_depth=5)
gbm1_multinomial = h2o.gbm(x=data1[["displacement", "power", "weight", "acceleration", "year"]],
y=data1["cylinders"],
min_rows=5,
distribution="multinomial",
ntrees=5,
max_depth=5)
gbm2_multinomial = h2o.gbm(x=data2[["displacement", "power", "weight", "acceleration", "year", "weights"]],
y=data2["cylinders"],
weights_column="weights",
training_frame=data2,
min_rows=5*min_rows_scale,
distribution="multinomial",
ntrees=5,
max_depth=5)
reg1_mse = gbm1_regression.mse()
reg2_mse = gbm2_regression.mse()
bin1_auc = gbm1_binomial.auc()
bin2_auc = gbm2_binomial.auc()
mul1_mse = gbm1_multinomial.mse()
mul2_mse = gbm2_multinomial.mse()
print "MSE (regresson) no weights vs. weights: {0}, {1}".format(reg1_mse, reg2_mse)
print "AUC (binomial) no weights vs. weights: {0}, {1}".format(bin1_auc, bin2_auc)
print "MSE (multinomial) no weights vs. weights: {0}, {1}".format(mul1_mse, mul2_mse)
assert abs(reg1_mse - reg2_mse) < 1e-6 * reg1_mse, "Expected mse's to be the same, but got {0}, and {1}".format(reg1_mse, reg2_mse)
assert abs(bin1_auc - bin2_auc) < 3e-4 * bin1_auc, "Expected auc's to be the same, but got {0}, and {1}".format(bin1_auc, bin2_auc)
assert abs(mul1_mse - mul1_mse) < 1e-6 * mul1_mse, "Expected auc's to be the same, but got {0}, and {1}".format(mul1_mse, mul2_mse)
h2o_cars_data = h2o.import_file(h2o.locate("smalldata/junit/cars_20mpg.csv"))
h2o_cars_data["economy_20mpg"] = h2o_cars_data["economy_20mpg"].asfactor()
h2o_cars_data["cylinders"] = h2o_cars_data["cylinders"].asfactor()
# uniform weights same as no weights
random.seed(2222)
weight = random.randint(1,10)
uniform_weights = [[weight] for r in range(406)]
h2o_uniform_weights = h2o.H2OFrame(python_obj=uniform_weights)
h2o_uniform_weights.setNames(["weights"])
h2o_data_uniform_weights = h2o_cars_data.cbind(h2o_uniform_weights)
print "Checking that using uniform weights is equivalent to no weights:"
print
check_same(h2o_cars_data, h2o_data_uniform_weights, weight)
# zero weights same as removed observations
zero_weights = [[0] if random.randint(0,1) else [1] for r in range(406)]
h2o_zero_weights = h2o.H2OFrame(python_obj=zero_weights)
h2o_zero_weights.setNames(["weights"])
h2o_data_zero_weights = h2o_cars_data.cbind(h2o_zero_weights)
h2o_data_zeros_removed = h2o_cars_data[h2o_zero_weights["weights"] == 1]
print "Checking that using some zero weights is equivalent to removing those observations:"
print
check_same(h2o_data_zeros_removed, h2o_data_zero_weights, 1)
# doubled weights same as doubled observations
doubled_weights = [[1] if random.randint(0,1) else [2] for r in range(406)]
h2o_doubled_weights = h2o.H2OFrame(python_obj=doubled_weights)
h2o_doubled_weights.setNames(["weights"])
h2o_data_doubled_weights = h2o_cars_data.cbind(h2o_doubled_weights)
doubled_data = h2o.as_list(h2o_cars_data, use_pandas=False)
colnames = doubled_data.pop(0)
for idx, w in enumerate(doubled_weights):
if w[0] == 2: doubled_data.append(doubled_data[idx])
h2o_data_doubled = h2o.H2OFrame(python_obj=doubled_data)
h2o_data_doubled.setNames(colnames)
h2o_data_doubled["economy_20mpg"] = h2o_data_doubled["economy_20mpg"].asfactor()
h2o_data_doubled["cylinders"] = h2o_data_doubled["cylinders"].asfactor()
h2o_data_doubled_weights["economy_20mpg"] = h2o_data_doubled_weights["economy_20mpg"].asfactor()
h2o_data_doubled_weights["cylinders"] = h2o_data_doubled_weights["cylinders"].asfactor()
print "Checking that doubling some weights is equivalent to doubling those observations:"
print
check_same(h2o_data_doubled, h2o_data_doubled_weights, 1)
# TODO: random weights
# TODO: all zero weights???
# TODO: negative weights???
if __name__ == "__main__":
h2o.run_test(sys.argv, weights_check)
| apache-2.0 |
legacysurvey/legacypipe | py/scripts/dr4/format_headers.py | 2 | 18171 | from __future__ import print_function
import sys
import os
import numpy as np
from glob import glob
import astropy;print(astropy)
from astropy.io import fits
from astrometry.util.fits import fits_table, merge_tables
# sha1sums
# nohup bash -c 'for num in `cat coadd_match_ep.txt|awk '"'"'{print $1}'"'"'`; do echo $num;find /global/cscratch1/sd/desiproc/dr4/data_release/dr4_fixes/coadd/$num -type f -print0|xargs -0 sha1sum > coadd_${num}_scr.sha1;done' > sha1.out &
def bash(cmd):
print(cmd)
rtn = os.system(cmd)
if rtn:
raise RuntimeError('Command failed: %s: return value: %i' %
(cmd,rtn))
def ccds_check_for_differences(mos_fn,bok_fn):
'''either survey-ccds or ccds-annotated'''
m=fits_table(mos_fn)
b=fits_table(bok_fn)
# Dtype
assert(len(b.get_columns()) == len(m.get_columns()))
emp= set(m.get_columns()).difference(set(b.get_columns()))
assert(len(emp) == 0)
for col in m.get_columns():
if m.get(col).dtype != b.get(col).dtype:
print('FAIL: columsn have diff types: ',col,m.get(col).dtype,b.get(col).dtype)
print('Done')
# DR4: ccd table differences 90prime & mosaic
def fix_differences_annotated_ccds(mos_fn,bok_fn,
return_early=False):
m=fits_table(mos_fn)
b=fits_table(bok_fn)
# mosaic should use bass dtype
for col,basstyp in zip (['camera','expid','expnum','propid','ut'],
['S7','S15','>i8','S12','S12']):
m.set(col,m.get(col).astype(basstyp))
# vice-versa
for col,mostyp in zip (['ha','image_filename'],
['S12','S55']):
b.set(col,b.get(col).astype(mostyp))
# remove redundant cols
for col in ['extname']:
b.delete_column(col)
# add extra cols
for col in ['telfocus']:
m.set(col,np.zeros((len(m),3)).astype(np.float32) - 1)
m.get(col).dtype = b.get(col).dtype
# deal with blank cols in 90prime
for col in ['arawgain']:
b.set(col,np.zeros(len(b)).astype(np.float32) - 1)
b.get(col).dtype = m.get(col).dtype
# Option to not save quite yet
if return_early:
return m,b
# Check
assert(len(b.get_columns()) == len(m.get_columns()))
emp= set(m.get_columns()).difference(set(b.get_columns()))
assert(len(emp) == 0)
for col in m.get_columns():
if m.get(col).dtype != b.get(col).dtype:
print('FAIL: columsn have diff types: ',col,m.get(col).dtype,b.get(col).dtype)
# Save
m.writeto(mos_fn.replace('.fits.gz','.fits'))
b.writeto(bok_fn.replace('.fits.gz','.fits'))
for fn in [mos_fn,bok_fn]:
bash('cp %s %s' % (fn,fn.replace('.fits.gz','_backup.fits.gz')))
bash('gzip %s' % (fn.replace('.fits.gz','.fits'),))
print('finished: fix_differences_annotated_ccds')
def fix_differences_survey_ccds(mos_fn,bok_fn):
# Identical to annotated
m,b= fix_differences_annotated_ccds(mos_fn,bok_fn,return_early=True)
# Except
for col,mostyp in zip (['object'],
['S24']):
b.set(col,b.get(col).astype(mostyp))
# Check
assert(len(b.get_columns()) == len(m.get_columns()))
emp= set(m.get_columns()).difference(set(b.get_columns()))
assert(len(emp) == 0)
for col in m.get_columns():
if m.get(col).dtype != b.get(col).dtype:
print('FAIL: columsn have diff types: ',col,m.get(col).dtype,b.get(col).dtype)
# Save
m.writeto(mos_fn.replace('.fits.gz','.fits'))
b.writeto(bok_fn.replace('.fits.gz','.fits'))
for fn in [mos_fn,bok_fn]:
bash('cp %s %s' % (fn,fn.replace('.fits.gz','_backup.fits.gz')))
bash('gzip %s' % (fn.replace('.fits.gz','.fits'),))
print('finished: fix_differences_survey_ccds')
def fix_units_survey_ccds(mos_fn,bok_fn):
m=fits_table(mos_fn)
b=fits_table(bok_fn)
mzls_pix= 0.262 #mosstat
bass_pix= 0.470 # 0.455 is correct, but mosstat.pro has 0.470
# mzls fwhm --> pixels
for col,typ in zip(['fwhm'],['>f4']):
m.set(col,m.get(col) / mzls_pix)
m.set(col,m.get(col).astype(typ))
# Check
assert(len(b.get_columns()) == len(m.get_columns()))
emp= set(m.get_columns()).difference(set(b.get_columns()))
assert(len(emp) == 0)
for col in m.get_columns():
if m.get(col).dtype != b.get(col).dtype:
print('FAIL: columsn have diff types: ',col,m.get(col).dtype,b.get(col).dtype)
# Save
m.writeto(mos_fn.replace('.fits.gz','.fits'))
#b.writeto(bok_fn.replace('.fits.gz','.fits'))
for fn in [mos_fn]: #,bok_fn]:
bash('cp %s %s' % (fn,fn.replace('.fits.gz','_backup.fits.gz')))
bash('gzip %s' % (fn.replace('.fits.gz','.fits'),))
print('finished: fix_units_survey_ccds')
def fix_order_survey_ccds(mos_fn,bok_fn):
m=fits_table(mos_fn)
b=fits_table(bok_fn)
b2=fits_table()
# mzls fwhm --> pixels
for col in m.get_columns():
b2.set(col,b.get(col))
# Check
assert(len(b2.get_columns()) == len(m.get_columns()))
emp= set(b2.get_columns()).difference(set(m.get_columns()))
assert(len(emp) == 0)
for col in m.get_columns():
if m.get(col).dtype != b2.get(col).dtype:
print('FAIL: columsn have diff types: ',col,m.get(col).dtype,b.get(col).dtype)
# Save
b2.writeto(bok_fn.replace('.fits.gz','.fits'))
for fn in [bok_fn]:
bash('cp %s %s' % (fn,fn.replace('.fits.gz','_backup.fits.gz')))
bash('gzip %s' % (fn.replace('.fits.gz','.fits'),))
print('finished: fix_units_survey_ccds')
def fix_bitmask_dr4(sccd_wbool, sccd_dr4,accd_dr4,which='mzls'):
'''WARNING copy files from dr4 first so keep prestine
3 files for each mzls,bass
sccd_wbool -- survey ccds having original boolean arrays for each cut
sccd_dr4 -- survey ccds released with dr4 that has incorrect bitmask
accd_dr4 -- annotated ccds released ...
'''
assert(which in ['mzls','bass'])
# Get correct bitmask using sccd_wbool
wbool=fits_table(sccd_wbool)
bitmask= np.zeros(len(wbool)).astype(np.uint8)
# mzls third_pix doesn't flag anything
for flag,bitval in zip(['bad_expid','ccd_hdu_mismatch','zpts_bad_astrom'],
[1,2,4]):
# a.get(flag) is True where ccd doesn't have this problem
i= wbool.get(flag) == False
bitmask[i]= bitmask[i]+ bitval
# Replace incorrect bitmask
for fn in [sccd_dr4,accd_dr4]:
savefn= fn.replace('.gz','')
ccd= fits_table(fn)
ccd.bitmask= bitmask
# Remove an outdated mask too
for col in ['ccd_cuts']:
if col in ccd.get_columns():
ccd.delete_column(col)
ccd.writeto(savefn)
print('Wrote %s' % savefn)
bash('gzip %s' % savefn)
print('gzipped %s' % savefn)
print('Done')
def modify_fits(fn, modify_func, **kwargs):
'''makes copy of fits file and modifies it
modify_func -- function that takes hdulist as input, modifies it as desired, and returns it
'''
# Gunzip
is_gzip= 'fits.gz' in fn
if is_gzip:
fn_backup= fn.replace('.fits.gz','_backup.fits.gz')
bash('cp %s %s' % (fn,fn_backup))
bash('gunzip %s' % fn)
fn= fn.replace('.gz','')
# Modify
print('modifying %s' % fn)
modify_func(fn, **kwargs)
# Gzip
if is_gzip:
bash('gzip %s' % fn)
def modify_survey_ccds(fn, which):
assert(which in ['mzls','bass','annot'])
savefn= fn.replace('.gz','')
# Modify Binary Table
# Add bitmask
a=fits_table(fn)
bm= np.zeros(len(a)).astype(np.uint8)
# mzls third_pix doesn't flag anything
for flag,bitval in zip(['bad_expid','ccd_hdu_mismatch','zpts_bad_astrom'],
[1,2,4]):
# a.get(flag) is True where ccd doesn't have this problem
i= a.get(flag) == False
bm[i]=bm[i]+ bitval
a.set('bitmask', bm)
# Examples for using bitmasks
# where bad_expid: np.where(np.bitwise_and(bm,[1]) > 0)
# where ccds are all good: bm == 0
keys= ['bad_expid','ccd_hdu_mismatch','zpts_bad_astrom']
if which == 'mzls':
keys += ['third_pix']
for key in keys:
a.delete_column(key)
bash('cp %s %s' % (fn,fn.replace('survey-ccds','backup_survey-ccds')))
a.writeto(savefn)
# Modify header
hdulist = fits.open(savefn, mode='readonly')
# Bitmask for
# bad_expid,ccd_hdu_mismatch,zpts_bad_astrom,third_pix
hdulist[1].header.set('PHOTOME','photometric','True if CCD considered photometric')
hdulist[1].header.set('BITMASK','bitmask','Additional cuts, See DR4 Docs')
# Remove those cols
#rng= range(63,67)
#if which == 'bass':
# rng= range(65,68)
#for i in rng:
# del hdulist[1].header['TTYPE' + str(i)]
# del hdulist[1].header['TFORM' + str(i)]
# Write
clob= True
hdulist.writeto(savefn, clobber=clob)
print('Wrote %s' % savefn)
bash('gzip %s' % savefn)
print('gzipped %s' % savefn)
def modify_survey_bricks(fn):
# Add bitmask
b= fits_table(fn)
b.cut(b.cut_dr4)
for key in ['cut_dr4','cut_oom','cut_old_chkpt','cut_no_signif_srcs','cut_no_srcs']:
b.delete_column(key)
bash('rm %s' % fn)
b.writeto(fn)
print('Write %s' % fn)
def modify_all_ccd_files(file_dr='./',survey_ccds=False,survey_bricks=False,
annot_ccds=False):
# Survey CCDs
kwargs= dict(which='mzls')
if survey_ccds:
modify_fits(os.path.join(file_dr,'survey-ccds-mzls.fits.gz'),
modify_func=modify_survey_ccds, **kwargs)
kwargs.update( dict(which='bass'))
modify_fits(os.path.join(file_dr,'survey-ccds-bass.fits.gz'),
modify_func=modify_survey_ccds, **kwargs)
# Survey Bricks
_=kwargs.pop('which')
if survey_bricks:
modify_fits(os.path.join(file_dr,'survey-bricks-dr4.fits.gz'),
modify_func=modify_survey_bricks, **kwargs)
# Annotated CCDs
kwargs.update(which='annot')
if annot_ccds:
modify_fits(os.path.join(file_dr,'ccds-annotated-dr4-mzls.fits.gz'),
modify_func=modify_survey_ccds, **kwargs)
modify_fits(os.path.join(file_dr,'ccds-annotated-dr4-90prime.fits.gz'),
modify_func=modify_survey_ccds, **kwargs)
def makedir_for_fn(fn):
try:
os.makedirs(os.path.dirname(fn))
except OSError:
print('no worries, dir already exists %s' % os.path.dirname(fn))
def get_fns(brick,outdir):
bri= brick[:3]
trac= [os.path.join(outdir,'tractor-i',bri,'tractor-%s.fits' % brick) ]
#os.path.join(outdir,'tractor',bri,'tractor-%s.fits' % brick),
met= [os.path.join(outdir,'metrics',bri,'all-models-%s.fits' % brick),
os.path.join(outdir,'metrics',bri,'blobs-%s.fits.gz' % brick)]
coadd= glob(os.path.join(outdir,'coadd',bri,brick,'legacysurvey-%s-*.fits*' % brick))
return trac + met + coadd
#return trac
def get_new_fns(brick,outdir):
bri= brick[:3]
trac= [os.path.join(outdir,'tractor-i',bri,'tractor-%s.fits' % brick),
os.path.join(outdir,'tractor',bri,'tractor-%s.fits' % brick)]
met= [os.path.join(outdir,'metrics',bri,'all-models-%s.fits' % brick),
os.path.join(outdir,'metrics',bri,'blobs-%s.fits.gz' % brick)]
coadd= glob(os.path.join(outdir,'coadd',bri,brick,'legacysurvey-%s-*.fits*' % brick))
return trac + met + coadd
#return trac
def get_touch_fn(brick,outdir):
bri= brick[:3]
fn= os.path.join(outdir,'touch_files',bri,'done-%s.txt' % brick)
makedir_for_fn(fn)
return fn
def get_sha_fn(brick,outdir):
bri= brick[:3]
return os.path.join(outdir,'tractor',bri, 'brick-%s.sha1sum' % brick)
def new_header(new_fn):
# Gunzip
is_gzip= 'fits.gz' in new_fn
if is_gzip:
bash('gunzip %s' % new_fn)
new_fn= new_fn.replace('.gz','')
# Header
print('Editing %s' % new_fn)
#a=fitsio.FITS(new_fn,'rw')
#hdr=a[0].read_header()
hdulist = fits.open(new_fn, mode='update')
# Skip if already fixed
if 'RELEASE' in hdulist[0].header:
pass
elif 'DECALSDR' in hdulist[0].header:
# Add
for key,val,comm in zip(['RELEASE','SURVEYDT','SURVEYID','DRVERSIO','WISEVR'],
['4000',hdulist[0].header['DECALSDT'],'BASS MzLS','4000','neo2-cori-scratch'],
['DR number','runbrick.py run time','Survey name','Survey data release number','unwise_coadds_timeresolved']):
hdulist[0].header.set(key,val,comm)
# Git describes: for bounding dates/commits when dr4b ran
hdulist[0].header.set('ASTROMVR','0.67-188-gfcdd3c0, 0.67-152-gfa03658',
'astrometry_net (3/6-4/15/2017)')
hdulist[0].header.set('TRACTOVR','dr4.1-9-gc73f1ab, dr4.1-9-ga5cfaa3',
'tractor (2/22-3/31/2017)')
hdulist[0].header.set('LEGDIRVR','dr3-17-g645c3ab',
'legacypipe-dir (3/28/2017)')
hdulist[0].header.set('LEGPIPVR','dr3e-834-g419c0ff, dr3e-887-g068df7a',
'legacypipe (3/15-4/19/2017)')
# Remove
rem_keys= ['DECALSDR','DECALSDT',
'SURVEYV','SURVEY']
for key in rem_keys:
del hdulist[0].header[key]
# Write
#clob= False
#if '/tractor/' in new_fn:
# clob=True
#clob= True
#hdulist.writeto(new_fn, clobber=clob, output_verify='fix')
hdulist.flush()
hdulist.close()
print('Modified inplace %s' % new_fn)
# Gzip
if is_gzip:
bash('gzip %s' % new_fn)
def tractor_i_fn(dir,brick):
bri=brick[:3]
return os.path.join(dir,'tractor-i',bri,'tractor-%s.fits' % brick)
def tractor_fn(dir,brick):
bri=brick[:3]
return os.path.join(dir,'tractor',bri,'tractor-%s.fits' % brick)
def do_checks(brick,dr4c_dir):
# Sanity checks on outputs
dr4c_fns= get_new_fns(brick,dr4c_dir)
#size each file > 0
#number files dr4c = dr4b
#size dr4c ~ dr4b
#number bricks that have nans, which cols nans appear in
def main(args=None):
'''new data model catalouge and fix all headers
touch a junk_brickname.txt file when done so can see which bricks finished
OR
sanity checks on outputs
'''
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--bricklist', action='store',default=None,
help='text file listin bricknames to rewrite headers for',
required=False)
parser.add_argument('--brick', action='store',default=None,
help='text file listin bricknames to rewrite headers for',
required=False)
parser.add_argument('--dr4c_dir', action='store',default='/global/projecta/projectdirs/cosmo/work/dr4c',
help='Where to write out the new data model catalouge and files with new headers',
required=False)
parser.add_argument('--sanitycheck', action='store_true',default=False,
help='set to test integrity of dr4c files',
required=False)
opt = parser.parse_args(args=args)
if opt.bricklist:
bricks= np.loadtxt(opt.bricklist,dtype=str)
# Handle just 1 brick in bricklist
assert(bricks.size > 0)
if bricks.size == 1:
bricks= np.array([bricks])
elif opt.brick:
bricks= [opt.brick]
else:
raise ValueError('must set either --bricklist or --brick')
#dr4b_dir= '/global/projecta/projectdirs/cosmo/work/dr4b'
dr4b_dir= '/global/cscratch1/sd/desiproc/dr4/data_release/dr4_fixes'
#dr4c_dir= '/global/projecta/projectdirs/cosmo/work/dr4c'
#dr4c_dir= '/global/cscratch1/sd/kaylanb/test/legacypipe/py/junk'
dr4c_dir= opt.dr4c_dir
for brick in bricks:
if opt.sanitycheck:
do_checks(brick,dr4c_dir=dr4c_dir)
continue
# At the end will touch a file so know all this finished
touch_fn= get_touch_fn(brick=brick, outdir=dr4c_dir)
if os.path.exists(touch_fn):
print('skipping brick=%s, touch_fn=%s exists' % (brick,touch_fn))
continue
# New Data Model
in_fn= tractor_i_fn(dr4b_dir,brick)
out_fn= tractor_fn(dr4c_dir,brick)
try:
os.makedirs(os.path.dirname(out_fn))
except OSError:
print('no worries, dir already exists %s' % os.path.dirname(out_fn))
# format_catalogue being run outside of this code, so in the batch job
bash('python legacypipe/format_catalog.py --in %s --out %s --dr4' % (in_fn,out_fn))
print('du after catalogue')
bash('du -shc %s' % out_fn)
# Modify header in place
new_header(new_fn=out_fn)
print('du after header')
bash('du -shc %s' % out_fn)
# Headers for all other files
fns= get_fns(brick,outdir=dr4b_dir)
for fn in fns:
# Copy to dr4c_dir
new_fn= fn.replace(dr4b_dir,dr4c_dir)
makedir_for_fn(new_fn)
bash('cp %s %s' % (fn, new_fn))
# Modify header in place
new_header(new_fn= new_fn)
print('du after header')
bash('du -shc %s' % new_fn)
# Sha1sum
fns= get_new_fns(brick=brick, outdir=dr4c_dir)
sha_fn= get_sha_fn(brick=brick, outdir=dr4c_dir)
lis= ' '.join(fns)
bash('echo %s |xargs sha1sum > %s' % (lis,sha_fn))
#bash("find /global/cscratch1/sd/desiproc/dr4/data_release/dr4_fixes/coadd/$num -type f -print0|xargs -0 sha1sum > coadd_${num}_scr.sha1")
print('Wrote sha_fn=%s' % sha_fn)
# Touch a file so know that finished
bash('touch %s' % touch_fn)
print('Wrote %s' % touch_fn)
if __name__ == '__main__':
# New data model and new fits headers for all files for given brick
main()
# OR
#modify_all_ccd_files(file_dr='./')
| bsd-3-clause |
androidarmv6/android_external_chromium_org | chrome/browser/resources/test_presubmit.py | 61 | 19703 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for Web Development Style Guide checker."""
import os
import re
import sys
import unittest
test_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.extend([
os.path.normpath(os.path.join(test_dir, '..', '..', '..', 'tools')),
os.path.join(test_dir),
])
import find_depot_tools # pylint: disable=W0611
from testing_support.super_mox import SuperMoxTestBase
from web_dev_style import css_checker, js_checker # pylint: disable=F0401
class JsStyleGuideTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
input_api = self.mox.CreateMockAnything()
input_api.re = re
output_api = self.mox.CreateMockAnything()
self.checker = js_checker.JSChecker(input_api, output_api)
def GetHighlight(self, line, error):
"""Returns the substring of |line| that is highlighted in |error|."""
error_lines = error.split('\n')
highlight = error_lines[error_lines.index(line) + 1]
return ''.join(ch1 for (ch1, ch2) in zip(line, highlight) if ch2 == '^')
def ShouldFailConstCheck(self, line):
"""Checks that the 'const' checker flags |line| as a style error."""
error = self.checker.ConstCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), 'const')
def ShouldPassConstCheck(self, line):
"""Checks that the 'const' checker doesn't flag |line| as a style error."""
self.assertEqual('', self.checker.ConstCheck(1, line),
'Should not be flagged as style error: ' + line)
def testConstFails(self):
lines = [
"const foo = 'bar';",
" const bar = 'foo';",
# Trying to use |const| as a variable name
"var const = 0;",
"var x = 5; const y = 6;",
"for (var i=0, const e=10; i<e; i++) {",
"for (const x=0; x<foo; i++) {",
"while (const x = 7) {",
]
for line in lines:
self.ShouldFailConstCheck(line)
def testConstPasses(self):
lines = [
# sanity check
"var foo = 'bar'",
# @const JsDoc tag
"/** @const */ var SEVEN = 7;",
# @const tag in multi-line comment
" * @const",
" * @const",
# @constructor tag in multi-line comment
" * @constructor",
" * @constructor",
# words containing 'const'
"if (foo.constructor) {",
"var deconstruction = 'something';",
"var madeUpWordconst = 10;",
# Strings containing the word |const|
"var str = 'const at the beginning';",
"var str = 'At the end: const';",
# doing this one with regex is probably not practical
#"var str = 'a const in the middle';",
]
for line in lines:
self.ShouldPassConstCheck(line)
def ShouldFailChromeSendCheck(self, line):
"""Checks that the 'chrome.send' checker flags |line| as a style error."""
error = self.checker.ChromeSendCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), ', []')
def ShouldPassChromeSendCheck(self, line):
"""Checks that the 'chrome.send' checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.ChromeSendCheck(1, line),
'Should not be flagged as style error: ' + line)
def testChromeSendFails(self):
lines = [
"chrome.send('message', []);",
" chrome.send('message', []);",
]
for line in lines:
self.ShouldFailChromeSendCheck(line)
def testChromeSendPasses(self):
lines = [
"chrome.send('message', constructArgs('foo', []));",
" chrome.send('message', constructArgs('foo', []));",
"chrome.send('message', constructArgs([]));",
" chrome.send('message', constructArgs([]));",
]
for line in lines:
self.ShouldPassChromeSendCheck(line)
def ShouldFailGetElementByIdCheck(self, line):
"""Checks that the 'getElementById' checker flags |line| as a style
error.
"""
error = self.checker.GetElementByIdCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), 'document.getElementById')
def ShouldPassGetElementByIdCheck(self, line):
"""Checks that the 'getElementById' checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.GetElementByIdCheck(1, line),
'Should not be flagged as style error: ' + line)
def testGetElementByIdFails(self):
lines = [
"document.getElementById('foo');",
" document.getElementById('foo');",
"var x = document.getElementById('foo');",
"if (document.getElementById('foo').hidden) {",
]
for line in lines:
self.ShouldFailGetElementByIdCheck(line)
def testGetElementByIdPasses(self):
lines = [
"elem.ownerDocument.getElementById('foo');",
" elem.ownerDocument.getElementById('foo');",
"var x = elem.ownerDocument.getElementById('foo');",
"if (elem.ownerDocument.getElementById('foo').hidden) {",
"doc.getElementById('foo');",
" doc.getElementById('foo');",
"cr.doc.getElementById('foo');",
" cr.doc.getElementById('foo');",
"var x = doc.getElementById('foo');",
"if (doc.getElementById('foo').hidden) {",
]
for line in lines:
self.ShouldPassGetElementByIdCheck(line)
def ShouldFailInheritDocCheck(self, line):
"""Checks that the '@inheritDoc' checker flags |line| as a style error."""
error = self.checker.InheritDocCheck(1, line)
self.assertNotEqual('', error,
msg='Should be flagged as style error: ' + line)
self.assertEqual(self.GetHighlight(line, error), '@inheritDoc')
def ShouldPassInheritDocCheck(self, line):
"""Checks that the '@inheritDoc' checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.InheritDocCheck(1, line),
msg='Should not be flagged as style error: ' + line)
def testInheritDocFails(self):
lines = [
" /** @inheritDoc */",
" * @inheritDoc",
]
for line in lines:
self.ShouldFailInheritDocCheck(line)
def testInheritDocPasses(self):
lines = [
"And then I said, but I won't @inheritDoc! Hahaha!",
" If your dad's a doctor, do you inheritDoc?",
" What's up, inherit doc?",
" this.inheritDoc(someDoc)",
]
for line in lines:
self.ShouldPassInheritDocCheck(line)
def ShouldFailWrapperTypeCheck(self, line):
"""Checks that the use of wrapper types (i.e. new Number(), @type {Number})
is a style error.
"""
error = self.checker.WrapperTypeCheck(1, line)
self.assertNotEqual('', error,
msg='Should be flagged as style error: ' + line)
highlight = self.GetHighlight(line, error)
self.assertTrue(highlight in ('Boolean', 'Number', 'String'))
def ShouldPassWrapperTypeCheck(self, line):
"""Checks that the wrapper type checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.WrapperTypeCheck(1, line),
msg='Should not be flagged as style error: ' + line)
def testWrapperTypePasses(self):
lines = [
"/** @param {!ComplexType} */",
" * @type {Object}",
" * @param {Function=} opt_callback",
" * @param {} num Number of things to add to {blah}.",
" * @return {!print_preview.PageNumberSet}",
" /* @returns {Number} */", # Should be /** @return {Number} */
"* @param {!LocalStrings}"
" Your type of Boolean is false!",
" Then I parameterized her Number from her friend!",
" A String of Pearls",
" types.params.aBoolean.typeString(someNumber)",
]
for line in lines:
self.ShouldPassWrapperTypeCheck(line)
def testWrapperTypeFails(self):
lines = [
" /**@type {String}*/(string)",
" * @param{Number=} opt_blah A number",
"/** @private @return {!Boolean} */",
" * @param {number|String}",
]
for line in lines:
self.ShouldFailWrapperTypeCheck(line)
def ShouldFailVarNameCheck(self, line):
"""Checks that var unix_hacker, $dollar are style errors."""
error = self.checker.VarNameCheck(1, line)
self.assertNotEqual('', error,
msg='Should be flagged as style error: ' + line)
highlight = self.GetHighlight(line, error)
self.assertFalse('var ' in highlight);
def ShouldPassVarNameCheck(self, line):
"""Checks that variableNamesLikeThis aren't style errors."""
self.assertEqual('', self.checker.VarNameCheck(1, line),
msg='Should not be flagged as style error: ' + line)
def testVarNameFails(self):
lines = [
"var private_;",
" var _super_private",
" var unix_hacker = someFunc();",
]
for line in lines:
self.ShouldFailVarNameCheck(line)
def testVarNamePasses(self):
lines = [
" var namesLikeThis = [];",
" for (var i = 0; i < 10; ++i) { ",
"for (var i in obj) {",
" var one, two, three;",
" var magnumPI = {};",
" var g_browser = 'da browzer';",
"/** @const */ var Bla = options.Bla;", # goog.scope() replacement.
" var $ = function() {", # For legacy reasons.
" var StudlyCaps = cr.define('bla')", # Classes.
" var SCARE_SMALL_CHILDREN = [", # TODO(dbeam): add @const in
# front of all these vars like
"/** @const */ CONST_VAR = 1;", # this line has (<--).
]
for line in lines:
self.ShouldPassVarNameCheck(line)
class CssStyleGuideTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
self.fake_file_name = 'fake.css'
self.fake_file = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.fake_file, 'LocalPath')
self.fake_file.LocalPath().AndReturn(self.fake_file_name)
# Actual calls to NewContents() are defined in each test.
self.mox.StubOutWithMock(self.fake_file, 'NewContents')
self.input_api = self.mox.CreateMockAnything()
self.input_api.re = re
self.mox.StubOutWithMock(self.input_api, 'AffectedSourceFiles')
self.input_api.AffectedFiles(
include_deletes=False, file_filter=None).AndReturn([self.fake_file])
# Actual creations of PresubmitPromptWarning are defined in each test.
self.output_api = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.output_api, 'PresubmitPromptWarning',
use_mock_anything=True)
author_msg = ('Was the CSS checker useful? '
'Send feedback or hate mail to dbeam@chromium.org.')
self.output_api = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.output_api, 'PresubmitNotifyResult',
use_mock_anything=True)
self.output_api.PresubmitNotifyResult(author_msg).AndReturn(None)
def VerifyContentsProducesOutput(self, contents, output):
self.fake_file.NewContents().AndReturn(contents.splitlines())
self.output_api.PresubmitPromptWarning(
self.fake_file_name + ':\n' + output.strip()).AndReturn(None)
self.mox.ReplayAll()
css_checker.CSSChecker(self.input_api, self.output_api).RunChecks()
def testCssAlphaWithAtBlock(self):
self.VerifyContentsProducesOutput("""
<include src="../shared/css/cr/ui/overlay.css">
<include src="chrome://resources/totally-cool.css" />
/* A hopefully safely ignored comment and @media statement. /**/
@media print {
div {
display: block;
color: red;
}
}
.rule {
z-index: 5;
<if expr="not is macosx">
background-image: url(chrome://resources/BLAH); /* TODO(dbeam): Fix this. */
background-color: rgb(235, 239, 249);
</if>
<if expr="is_macosx">
background-color: white;
background-image: url(chrome://resources/BLAH2);
</if>
color: black;
}
<if expr="is_macosx">
.language-options-right {
visibility: hidden;
opacity: 1; /* TODO(dbeam): Fix this. */
}
</if>""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
display: block;
color: red;
z-index: 5;
color: black;""")
def testCssAlphaWithNonStandard(self):
self.VerifyContentsProducesOutput("""
div {
/* A hopefully safely ignored comment and @media statement. /**/
color: red;
-webkit-margin-start: 5px;
}""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
color: red;
-webkit-margin-start: 5px;""")
def testCssAlphaWithLongerDashedProps(self):
self.VerifyContentsProducesOutput("""
div {
border-left: 5px; /* A hopefully removed comment. */
border: 5px solid red;
}""", """
- Alphabetize properties and list vendor specific (i.e. -webkit) above standard.
border-left: 5px;
border: 5px solid red;""")
def testCssBracesHaveSpaceBeforeAndNothingAfter(self):
self.VerifyContentsProducesOutput("""
/* Hello! */div/* Comment here*/{
display: block;
}
blah /* hey! */
{
rule: value;
}
.this.is { /* allowed */
rule: value;
}""", """
- Start braces ({) end a selector, have a space before them and no rules after.
div{
{""")
def testCssClassesUseDashes(self):
self.VerifyContentsProducesOutput("""
.className,
.ClassName,
.class-name /* We should not catch this. */,
.class_name {
display: block;
}""", """
- Classes use .dash-form.
.className,
.ClassName,
.class_name {""")
def testCssCloseBraceOnNewLine(self):
self.VerifyContentsProducesOutput("""
@media { /* TODO(dbeam) Fix this case. */
.rule {
display: block;
}}
@-webkit-keyframe blah {
100% { height: -500px 0; }
}
#rule {
rule: value; }""", """
- Always put a rule closing brace (}) on a new line.
rule: value; }""")
def testCssColonsHaveSpaceAfter(self):
self.VerifyContentsProducesOutput("""
div:not(.class):not([attr=5]), /* We should not catch this. */
div:not(.class):not([attr]) /* Nor this. */ {
background: url(data:image/jpeg,asdfasdfsadf); /* Ignore this. */
background: -webkit-linear-gradient(left, red,
80% blah blee blar);
color: red;
display:block;
}""", """
- Colons (:) should have a space after them.
display:block;
- Don't use data URIs in source files. Use grit instead.
background: url(data:image/jpeg,asdfasdfsadf);""")
def testCssFavorSingleQuotes(self):
self.VerifyContentsProducesOutput("""
html[dir="rtl"] body,
html[dir=ltr] body /* TODO(dbeam): Require '' around rtl in future? */ {
background: url("chrome://resources/BLAH");
font-family: "Open Sans";
<if expr="is_macosx">
blah: blee;
</if>
}""", """
- Use single quotes (') instead of double quotes (") in strings.
html[dir="rtl"] body,
background: url("chrome://resources/BLAH");
font-family: "Open Sans";""")
def testCssHexCouldBeShorter(self):
self.VerifyContentsProducesOutput("""
#abc,
#abc-,
#abc-ghij,
#abcdef-,
#abcdef-ghij,
#aaaaaa,
#bbaacc {
background-color: #336699; /* Ignore short hex rule if not gray. */
color: #999999;
color: #666;
}""", """
- Use abbreviated hex (#rgb) when in form #rrggbb.
color: #999999; (replace with #999)
- Use rgb() over #hex when not a shade of gray (like #333).
background-color: #336699; (replace with rgb(51, 102, 153))""")
def testCssUseMillisecondsForSmallTimes(self):
self.VerifyContentsProducesOutput("""
.transition-0s /* This is gross but may happen. */ {
transform: one 0.2s;
transform: two .1s;
transform: tree 1s;
transform: four 300ms;
}""", """
- Use milliseconds for time measurements under 1 second.
transform: one 0.2s; (replace with 200ms)
transform: two .1s; (replace with 100ms)""")
def testCssNoDataUrisInSourceFiles(self):
self.VerifyContentsProducesOutput("""
img {
background: url( data:image/jpeg,4\/\/350|\/|3|2 );
background: url('data:image/jpeg,4\/\/350|\/|3|2');
}""", """
- Don't use data URIs in source files. Use grit instead.
background: url( data:image/jpeg,4\/\/350|\/|3|2 );
background: url('data:image/jpeg,4\/\/350|\/|3|2');""")
def testCssOneRulePerLine(self):
self.VerifyContentsProducesOutput("""
a:not([hidden]):not(.custom-appearance):not([version=1]):first-of-type,
a:not([hidden]):not(.custom-appearance):not([version=1]):first-of-type ~
input[type='checkbox']:not([hidden]),
div {
background: url(chrome://resources/BLAH);
rule: value; /* rule: value; */
rule: value; rule: value;
}""", """
- One rule per line (what not to do: color: red; margin: 0;).
rule: value; rule: value;""")
def testCssOneSelectorPerLine(self):
self.VerifyContentsProducesOutput("""
a,
div,a,
div,/* Hello! */ span,
#id.class([dir=rtl):not(.class):any(a, b, d) {
rule: value;
}
a,
div,a {
some-other: rule here;
}""", """
- One selector per line (what not to do: a, b {}).
div,a,
div, span,
div,a {""")
def testCssPseudoElementDoubleColon(self):
self.VerifyContentsProducesOutput("""
a:href,
br::after,
::-webkit-scrollbar-thumb,
a:not([empty]):hover:focus:active, /* shouldn't catch here and above */
abbr:after,
.tree-label:empty:after,
b:before,
:-WebKit-ScrollBar {
rule: value;
}""", """
- Pseudo-elements should use double colon (i.e. ::after).
:after (should be ::after)
:after (should be ::after)
:before (should be ::before)
:-WebKit-ScrollBar (should be ::-WebKit-ScrollBar)
""")
def testCssRgbIfNotGray(self):
self.VerifyContentsProducesOutput("""
#abc,
#aaa,
#aabbcc {
background: -webkit-linear-gradient(left, from(#abc), to(#def));
color: #bad;
color: #bada55;
}""", """
- Use rgb() over #hex when not a shade of gray (like #333).
background: -webkit-linear-gradient(left, from(#abc), to(#def)); """
"""(replace with rgb(170, 187, 204), rgb(221, 238, 255))
color: #bad; (replace with rgb(187, 170, 221))
color: #bada55; (replace with rgb(186, 218, 85))""")
def testCssZeroLengthTerms(self):
self.VerifyContentsProducesOutput("""
@-webkit-keyframe anim {
0% { /* Ignore key frames */
width: 0px;
}
10% {
width: 10px;
}
100% {
width: 100px;
}
}
.media-button.play > .state0.active,
.media-button[state='0'] > .state0.normal /* blah */, /* blee */
.media-button[state='0']:not(.disabled):hover > .state0.hover {
-webkit-animation: anim 0s;
-webkit-animation-duration: anim 0ms;
-webkit-transform: scale(0%),
translateX(0deg),
translateY(0rad),
translateZ(0grad);
background-position-x: 0em;
background-position-y: 0ex;
border-width: 0em;
color: hsl(0, 0%, 85%); /* Shouldn't trigger error. */
opacity: .0;
opacity: 0.0;
opacity: 0.;
}
@page {
border-width: 0mm;
height: 0cm;
width: 0in;
}""", """
- Make all zero length terms (i.e. 0px) 0 unless inside of hsl() or part of"""
""" @keyframe.
width: 0px;
-webkit-animation: anim 0s;
-webkit-animation-duration: anim 0ms;
-webkit-transform: scale(0%),
translateX(0deg),
translateY(0rad),
translateZ(0grad);
background-position-x: 0em;
background-position-y: 0ex;
border-width: 0em;
opacity: .0;
opacity: 0.0;
opacity: 0.;
border-width: 0mm;
height: 0cm;
width: 0in;
""")
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
mancoast/CPythonPyc_test | fail/330_test_abc.py | 5 | 14482 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Unit tests for abc.py."""
import unittest
from test import support
import abc
from inspect import isabstract
class TestLegacyAPI(unittest.TestCase):
def test_abstractproperty_basics(self):
@abc.abstractproperty
def foo(self): pass
self.assertTrue(foo.__isabstractmethod__)
def bar(self): pass
self.assertFalse(hasattr(bar, "__isabstractmethod__"))
class C(metaclass=abc.ABCMeta):
@abc.abstractproperty
def foo(self): return 3
self.assertRaises(TypeError, C)
class D(C):
@property
def foo(self): return super().foo
self.assertEqual(D().foo, 3)
self.assertFalse(getattr(D.foo, "__isabstractmethod__", False))
def test_abstractclassmethod_basics(self):
@abc.abstractclassmethod
def foo(cls): pass
self.assertTrue(foo.__isabstractmethod__)
@classmethod
def bar(cls): pass
self.assertFalse(getattr(bar, "__isabstractmethod__", False))
class C(metaclass=abc.ABCMeta):
@abc.abstractclassmethod
def foo(cls): return cls.__name__
self.assertRaises(TypeError, C)
class D(C):
@classmethod
def foo(cls): return super().foo()
self.assertEqual(D.foo(), 'D')
self.assertEqual(D().foo(), 'D')
def test_abstractstaticmethod_basics(self):
@abc.abstractstaticmethod
def foo(): pass
self.assertTrue(foo.__isabstractmethod__)
@staticmethod
def bar(): pass
self.assertFalse(getattr(bar, "__isabstractmethod__", False))
class C(metaclass=abc.ABCMeta):
@abc.abstractstaticmethod
def foo(): return 3
self.assertRaises(TypeError, C)
class D(C):
@staticmethod
def foo(): return 4
self.assertEqual(D.foo(), 4)
self.assertEqual(D().foo(), 4)
def test_abstractmethod_integration(self):
for abstractthing in [abc.abstractmethod, abc.abstractproperty,
abc.abstractclassmethod,
abc.abstractstaticmethod]:
class C(metaclass=abc.ABCMeta):
@abstractthing
def foo(self): pass # abstract
def bar(self): pass # concrete
self.assertEqual(C.__abstractmethods__, {"foo"})
self.assertRaises(TypeError, C) # because foo is abstract
self.assertTrue(isabstract(C))
class D(C):
def bar(self): pass # concrete override of concrete
self.assertEqual(D.__abstractmethods__, {"foo"})
self.assertRaises(TypeError, D) # because foo is still abstract
self.assertTrue(isabstract(D))
class E(D):
def foo(self): pass
self.assertEqual(E.__abstractmethods__, set())
E() # now foo is concrete, too
self.assertFalse(isabstract(E))
class F(E):
@abstractthing
def bar(self): pass # abstract override of concrete
self.assertEqual(F.__abstractmethods__, {"bar"})
self.assertRaises(TypeError, F) # because bar is abstract now
self.assertTrue(isabstract(F))
class TestABC(unittest.TestCase):
def test_abstractmethod_basics(self):
@abc.abstractmethod
def foo(self): pass
self.assertTrue(foo.__isabstractmethod__)
def bar(self): pass
self.assertFalse(hasattr(bar, "__isabstractmethod__"))
def test_abstractproperty_basics(self):
@property
@abc.abstractmethod
def foo(self): pass
self.assertTrue(foo.__isabstractmethod__)
def bar(self): pass
self.assertFalse(getattr(bar, "__isabstractmethod__", False))
class C(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def foo(self): return 3
self.assertRaises(TypeError, C)
class D(C):
@C.foo.getter
def foo(self): return super().foo
self.assertEqual(D().foo, 3)
def test_abstractclassmethod_basics(self):
@classmethod
@abc.abstractmethod
def foo(cls): pass
self.assertTrue(foo.__isabstractmethod__)
@classmethod
def bar(cls): pass
self.assertFalse(getattr(bar, "__isabstractmethod__", False))
class C(metaclass=abc.ABCMeta):
@classmethod
@abc.abstractmethod
def foo(cls): return cls.__name__
self.assertRaises(TypeError, C)
class D(C):
@classmethod
def foo(cls): return super().foo()
self.assertEqual(D.foo(), 'D')
self.assertEqual(D().foo(), 'D')
def test_abstractstaticmethod_basics(self):
@staticmethod
@abc.abstractmethod
def foo(): pass
self.assertTrue(foo.__isabstractmethod__)
@staticmethod
def bar(): pass
self.assertFalse(getattr(bar, "__isabstractmethod__", False))
class C(metaclass=abc.ABCMeta):
@staticmethod
@abc.abstractmethod
def foo(): return 3
self.assertRaises(TypeError, C)
class D(C):
@staticmethod
def foo(): return 4
self.assertEqual(D.foo(), 4)
self.assertEqual(D().foo(), 4)
def test_abstractmethod_integration(self):
for abstractthing in [abc.abstractmethod, abc.abstractproperty,
abc.abstractclassmethod,
abc.abstractstaticmethod]:
class C(metaclass=abc.ABCMeta):
@abstractthing
def foo(self): pass # abstract
def bar(self): pass # concrete
self.assertEqual(C.__abstractmethods__, {"foo"})
self.assertRaises(TypeError, C) # because foo is abstract
self.assertTrue(isabstract(C))
class D(C):
def bar(self): pass # concrete override of concrete
self.assertEqual(D.__abstractmethods__, {"foo"})
self.assertRaises(TypeError, D) # because foo is still abstract
self.assertTrue(isabstract(D))
class E(D):
def foo(self): pass
self.assertEqual(E.__abstractmethods__, set())
E() # now foo is concrete, too
self.assertFalse(isabstract(E))
class F(E):
@abstractthing
def bar(self): pass # abstract override of concrete
self.assertEqual(F.__abstractmethods__, {"bar"})
self.assertRaises(TypeError, F) # because bar is abstract now
self.assertTrue(isabstract(F))
def test_descriptors_with_abstractmethod(self):
class C(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def foo(self): return 3
@foo.setter
@abc.abstractmethod
def foo(self, val): pass
self.assertRaises(TypeError, C)
class D(C):
@C.foo.getter
def foo(self): return super().foo
self.assertRaises(TypeError, D)
class E(D):
@D.foo.setter
def foo(self, val): pass
self.assertEqual(E().foo, 3)
# check that the property's __isabstractmethod__ descriptor does the
# right thing when presented with a value that fails truth testing:
class NotBool(object):
def __nonzero__(self):
raise ValueError()
__len__ = __nonzero__
with self.assertRaises(ValueError):
class F(C):
def bar(self):
pass
bar.__isabstractmethod__ = NotBool()
foo = property(bar)
def test_customdescriptors_with_abstractmethod(self):
class Descriptor:
def __init__(self, fget, fset=None):
self._fget = fget
self._fset = fset
def getter(self, callable):
return Descriptor(callable, self._fget)
def setter(self, callable):
return Descriptor(self._fget, callable)
@property
def __isabstractmethod__(self):
return (getattr(self._fget, '__isabstractmethod__', False)
or getattr(self._fset, '__isabstractmethod__', False))
class C(metaclass=abc.ABCMeta):
@Descriptor
@abc.abstractmethod
def foo(self): return 3
@foo.setter
@abc.abstractmethod
def foo(self, val): pass
self.assertRaises(TypeError, C)
class D(C):
@C.foo.getter
def foo(self): return super().foo
self.assertRaises(TypeError, D)
class E(D):
@D.foo.setter
def foo(self, val): pass
self.assertFalse(E.foo.__isabstractmethod__)
def test_metaclass_abc(self):
# Metaclasses can be ABCs, too.
class A(metaclass=abc.ABCMeta):
@abc.abstractmethod
def x(self):
pass
self.assertEqual(A.__abstractmethods__, {"x"})
class meta(type, A):
def x(self):
return 1
class C(metaclass=meta):
pass
def test_registration_basics(self):
class A(metaclass=abc.ABCMeta):
pass
class B(object):
pass
b = B()
self.assertFalse(issubclass(B, A))
self.assertFalse(issubclass(B, (A,)))
self.assertNotIsInstance(b, A)
self.assertNotIsInstance(b, (A,))
B1 = A.register(B)
self.assertTrue(issubclass(B, A))
self.assertTrue(issubclass(B, (A,)))
self.assertIsInstance(b, A)
self.assertIsInstance(b, (A,))
self.assertIs(B1, B)
class C(B):
pass
c = C()
self.assertTrue(issubclass(C, A))
self.assertTrue(issubclass(C, (A,)))
self.assertIsInstance(c, A)
self.assertIsInstance(c, (A,))
def test_register_as_class_deco(self):
class A(metaclass=abc.ABCMeta):
pass
@A.register
class B(object):
pass
b = B()
self.assertTrue(issubclass(B, A))
self.assertTrue(issubclass(B, (A,)))
self.assertIsInstance(b, A)
self.assertIsInstance(b, (A,))
@A.register
class C(B):
pass
c = C()
self.assertTrue(issubclass(C, A))
self.assertTrue(issubclass(C, (A,)))
self.assertIsInstance(c, A)
self.assertIsInstance(c, (A,))
self.assertIs(C, A.register(C))
def test_isinstance_invalidation(self):
class A(metaclass=abc.ABCMeta):
pass
class B:
pass
b = B()
self.assertFalse(isinstance(b, A))
self.assertFalse(isinstance(b, (A,)))
A.register(B)
self.assertTrue(isinstance(b, A))
self.assertTrue(isinstance(b, (A,)))
def test_registration_builtins(self):
class A(metaclass=abc.ABCMeta):
pass
A.register(int)
self.assertIsInstance(42, A)
self.assertIsInstance(42, (A,))
self.assertTrue(issubclass(int, A))
self.assertTrue(issubclass(int, (A,)))
class B(A):
pass
B.register(str)
class C(str): pass
self.assertIsInstance("", A)
self.assertIsInstance("", (A,))
self.assertTrue(issubclass(str, A))
self.assertTrue(issubclass(str, (A,)))
self.assertTrue(issubclass(C, A))
self.assertTrue(issubclass(C, (A,)))
def test_registration_edge_cases(self):
class A(metaclass=abc.ABCMeta):
pass
A.register(A) # should pass silently
class A1(A):
pass
self.assertRaises(RuntimeError, A1.register, A) # cycles not allowed
class B(object):
pass
A1.register(B) # ok
A1.register(B) # should pass silently
class C(A):
pass
A.register(C) # should pass silently
self.assertRaises(RuntimeError, C.register, A) # cycles not allowed
C.register(B) # ok
def test_register_non_class(self):
class A(metaclass=abc.ABCMeta):
pass
self.assertRaisesRegex(TypeError, "Can only register classes",
A.register, 4)
def test_registration_transitiveness(self):
class A(metaclass=abc.ABCMeta):
pass
self.assertTrue(issubclass(A, A))
self.assertTrue(issubclass(A, (A,)))
class B(metaclass=abc.ABCMeta):
pass
self.assertFalse(issubclass(A, B))
self.assertFalse(issubclass(A, (B,)))
self.assertFalse(issubclass(B, A))
self.assertFalse(issubclass(B, (A,)))
class C(metaclass=abc.ABCMeta):
pass
A.register(B)
class B1(B):
pass
self.assertTrue(issubclass(B1, A))
self.assertTrue(issubclass(B1, (A,)))
class C1(C):
pass
B1.register(C1)
self.assertFalse(issubclass(C, B))
self.assertFalse(issubclass(C, (B,)))
self.assertFalse(issubclass(C, B1))
self.assertFalse(issubclass(C, (B1,)))
self.assertTrue(issubclass(C1, A))
self.assertTrue(issubclass(C1, (A,)))
self.assertTrue(issubclass(C1, B))
self.assertTrue(issubclass(C1, (B,)))
self.assertTrue(issubclass(C1, B1))
self.assertTrue(issubclass(C1, (B1,)))
C1.register(int)
class MyInt(int):
pass
self.assertTrue(issubclass(MyInt, A))
self.assertTrue(issubclass(MyInt, (A,)))
self.assertIsInstance(42, A)
self.assertIsInstance(42, (A,))
def test_all_new_methods_are_called(self):
class A(metaclass=abc.ABCMeta):
pass
class B(object):
counter = 0
def __new__(cls):
B.counter += 1
return super().__new__(cls)
class C(A, B):
pass
self.assertEqual(B.counter, 0)
C()
self.assertEqual(B.counter, 1)
def test_main():
support.run_unittest(TestABC)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
ryfeus/lambda-packs | Keras_tensorflow/source/tensorflow/contrib/specs/python/specs_test.py | 22 | 9621 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Testing specs specifications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.specs import python
from tensorflow.contrib.specs.python import summaries
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.math_ops # pylint: disable=unused-import
from tensorflow.python.platform import test
specs = python
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class SpecsTest(test.TestCase):
def testSimpleConv(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 18, 19, 5))
spec = "net = Cr(64, [5, 5])"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 18, 19, 64])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 18, 19, 64))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu")
def testUnary(self):
# This is just a quick and dirty check that these ops exist
# and work as unary ops.
with self.test_session():
inputs = constant_op.constant(_rand(17, 55))
spec = "net = Do(0.5) | Bn | Unit(1) | Relu | Sig | Tanh | Smax"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [17, 55])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (17, 55))
def testAdd(self):
with self.test_session():
inputs = constant_op.constant(_rand(17, 55))
spec = "net = Fs(10) + Fr(10)"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [17, 10])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (17, 10))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 dot variablev2 biasadd sig "
"<> variablev2 dot variablev2 biasadd relu add")
def testMpPower(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "M2 = Mp([2, 2]); net = M2**3"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 8, 8, 5])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 8, 8, 5))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ maxpool maxpool maxpool")
def testAbbrevPower(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "C3 = Cr([3, 3]); M2 = Mp([2, 2]); net = (C3(5) | M2)**3"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 8, 8, 5])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 8, 8, 5))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu maxpool"
" variablev2 conv variablev2"
" biasadd relu maxpool variablev2 conv variablev2"
" biasadd relu maxpool")
def testAbbrevPower2(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "C3 = Cr(_1=[3, 3]); M2 = Mp([2, 2]);"
spec += "net = (C3(_0=5) | M2)**3"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 8, 8, 5])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 8, 8, 5))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 conv variablev2 biasadd relu maxpool"
" variablev2 conv variablev2 biasadd relu"
" maxpool variablev2 conv variablev2 biasadd relu"
" maxpool")
def testConc(self):
with self.test_session():
inputs = constant_op.constant(_rand(10, 20))
spec = "net = Conc(1, Fs(20), Fs(10))"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [10, 30])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (10, 30))
self.assertEqual(
summaries.tf_spec_structure(spec, inputs),
"_ variablev2 dot variablev2 biasadd sig "
"<> variablev2 dot variablev2 biasadd sig _ concatv2")
def testImport(self):
with self.test_session():
inputs = constant_op.constant(_rand(10, 20))
spec = ("S = Import('from tensorflow.python.ops" +
" import math_ops; f = math_ops.sigmoid')")
spec += "; net = S | S"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [10, 20])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (10, 20))
self.assertEqual(summaries.tf_spec_structure(spec, inputs), "_ sig sig")
def testLstm2(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "net = Lstm2(15)"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 64, 64, 15])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 64, 64, 15))
def testLstm2to1(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "net = Lstm2to1(15)"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 64, 15])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 64, 15))
def testLstm2to0(self):
with self.test_session():
inputs = constant_op.constant(_rand(1, 64, 64, 5))
spec = "net = Lstm2to0(15)"
outputs = specs.create_net(spec, inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 15])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (1, 15))
def testKeywordRestriction(self):
with self.test_session():
inputs = constant_op.constant(_rand(10, 20))
spec = "import re; net = Conc(1, Fs(20), Fs(10))"
self.assertRaises(ValueError, lambda: specs.create_net(spec, inputs))
def testParams(self):
params = "x = 3; y = Ui(-10, 10); z = Lf(1, 100); q = Nt(0.0, 1.0)"
bindings = specs.eval_params(params, {})
self.assertTrue("x" in bindings)
self.assertEqual(bindings["x"], 3)
self.assertTrue("y" in bindings)
self.assertTrue("z" in bindings)
self.assertTrue("q" in bindings)
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testSpecsOps(self):
# pylint: disable=undefined-variable
with self.assertRaises(NameError):
_ = Cr
with specs.ops:
self.assertIsNotNone(Cr)
self.assertTrue(callable(Cr(64, [3, 3])))
with self.assertRaises(NameError):
_ = Cr
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testVar(self):
with self.test_session() as sess:
with specs.ops:
# pylint: disable=undefined-variable
v = Var("test_var",
shape=[2, 2],
initializer=init_ops.constant_initializer(42.0))
inputs = constant_op.constant(_rand(10, 100))
outputs = v.funcall(inputs)
self.assertEqual(len(variables.global_variables()), 1)
sess.run([outputs.initializer])
outputs_value = outputs.eval()
self.assertEqual(outputs_value.shape, (2, 2))
self.assertEqual(outputs_value[1, 1], 42.0)
# XXX: the cleverness of this code is over 9000
# TODO: original author please fix
def DISABLED_testShared(self):
with self.test_session():
with specs.ops:
# pylint: disable=undefined-variable
f = Shared(Fr(100))
g = f | f | f | f
inputs = constant_op.constant(_rand(10, 100))
_ = g.funcall(inputs)
self.assertEqual(len(variables.global_variables()), 2)
if __name__ == "__main__":
test.main()
| mit |
mrquim/mrquimrepo | plugin.video.playlistLoader/resources/lib/chardet/__init__.py | 745 | 1295 | ######################## BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
__version__ = "2.2.1"
from sys import version_info
def detect(aBuf):
if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or
(version_info >= (3, 0) and not isinstance(aBuf, bytes))):
raise ValueError('Expected a bytes object, not a unicode object')
from . import universaldetector
u = universaldetector.UniversalDetector()
u.reset()
u.feed(aBuf)
u.close()
return u.result
| gpl-2.0 |
feroda/django | django/contrib/gis/db/backends/oracle/schema.py | 608 | 4050 | from django.contrib.gis.db.models.fields import GeometryField
from django.db.backends.oracle.schema import DatabaseSchemaEditor
from django.db.backends.utils import truncate_name
class OracleGISSchemaEditor(DatabaseSchemaEditor):
sql_add_geometry_metadata = ("""
INSERT INTO USER_SDO_GEOM_METADATA
("TABLE_NAME", "COLUMN_NAME", "DIMINFO", "SRID")
VALUES (
%(table)s,
%(column)s,
MDSYS.SDO_DIM_ARRAY(
MDSYS.SDO_DIM_ELEMENT('LONG', %(dim0)s, %(dim2)s, %(tolerance)s),
MDSYS.SDO_DIM_ELEMENT('LAT', %(dim1)s, %(dim3)s, %(tolerance)s)
),
%(srid)s
)""")
sql_add_spatial_index = 'CREATE INDEX %(index)s ON %(table)s(%(column)s) INDEXTYPE IS MDSYS.SPATIAL_INDEX'
sql_drop_spatial_index = 'DROP INDEX %(index)s'
sql_clear_geometry_table_metadata = 'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s'
sql_clear_geometry_field_metadata = (
'DELETE FROM USER_SDO_GEOM_METADATA WHERE TABLE_NAME = %(table)s '
'AND COLUMN_NAME = %(column)s'
)
def __init__(self, *args, **kwargs):
super(OracleGISSchemaEditor, self).__init__(*args, **kwargs)
self.geometry_sql = []
def geo_quote_name(self, name):
return self.connection.ops.geo_quote_name(name)
def column_sql(self, model, field, include_default=False):
column_sql = super(OracleGISSchemaEditor, self).column_sql(model, field, include_default)
if isinstance(field, GeometryField):
db_table = model._meta.db_table
self.geometry_sql.append(
self.sql_add_geometry_metadata % {
'table': self.geo_quote_name(db_table),
'column': self.geo_quote_name(field.column),
'dim0': field._extent[0],
'dim1': field._extent[1],
'dim2': field._extent[2],
'dim3': field._extent[3],
'tolerance': field._tolerance,
'srid': field.srid,
}
)
if field.spatial_index:
self.geometry_sql.append(
self.sql_add_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
'table': self.quote_name(db_table),
'column': self.quote_name(field.column),
}
)
return column_sql
def create_model(self, model):
super(OracleGISSchemaEditor, self).create_model(model)
self.run_geometry_sql()
def delete_model(self, model):
super(OracleGISSchemaEditor, self).delete_model(model)
self.execute(self.sql_clear_geometry_table_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
})
def add_field(self, model, field):
super(OracleGISSchemaEditor, self).add_field(model, field)
self.run_geometry_sql()
def remove_field(self, model, field):
if isinstance(field, GeometryField):
self.execute(self.sql_clear_geometry_field_metadata % {
'table': self.geo_quote_name(model._meta.db_table),
'column': self.geo_quote_name(field.column),
})
if field.spatial_index:
self.execute(self.sql_drop_spatial_index % {
'index': self.quote_name(self._create_spatial_index_name(model, field)),
})
super(OracleGISSchemaEditor, self).remove_field(model, field)
def run_geometry_sql(self):
for sql in self.geometry_sql:
self.execute(sql)
self.geometry_sql = []
def _create_spatial_index_name(self, model, field):
# Oracle doesn't allow object names > 30 characters. Use this scheme
# instead of self._create_index_name() for backwards compatibility.
return truncate_name('%s_%s_id' % (model._meta.db_table, field.column), 30)
| bsd-3-clause |
0k/OpenUpgrade | addons/auth_ldap/users_ldap.py | 6 | 10715 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ldap
import logging
from ldap.filter import filter_format
import openerp.exceptions
from openerp import tools
from openerp.osv import fields, osv
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
_logger = logging.getLogger(__name__)
class CompanyLDAP(osv.osv):
_name = 'res.company.ldap'
_order = 'sequence'
_rec_name = 'ldap_server'
def get_ldap_dicts(self, cr, ids=None):
"""
Retrieve res_company_ldap resources from the database in dictionary
format.
:param list ids: Valid ids of model res_company_ldap. If not \
specified, process all resources (unlike other ORM methods).
:return: ldap configurations
:rtype: list of dictionaries
"""
if ids:
id_clause = 'AND id IN (%s)'
args = [tuple(ids)]
else:
id_clause = ''
args = []
cr.execute("""
SELECT id, company, ldap_server, ldap_server_port, ldap_binddn,
ldap_password, ldap_filter, ldap_base, "user", create_user,
ldap_tls
FROM res_company_ldap
WHERE ldap_server != '' """ + id_clause + """ ORDER BY sequence
""", args)
return cr.dictfetchall()
def connect(self, conf):
"""
Connect to an LDAP server specified by an ldap
configuration dictionary.
:param dict conf: LDAP configuration
:return: an LDAP object
"""
uri = 'ldap://%s:%d' % (conf['ldap_server'],
conf['ldap_server_port'])
connection = ldap.initialize(uri)
if conf['ldap_tls']:
connection.start_tls_s()
return connection
def authenticate(self, conf, login, password):
"""
Authenticate a user against the specified LDAP server.
In order to prevent an unintended 'unauthenticated authentication',
which is an anonymous bind with a valid dn and a blank password,
check for empty passwords explicitely (:rfc:`4513#section-6.3.1`)
:param dict conf: LDAP configuration
:param login: username
:param password: Password for the LDAP user
:return: LDAP entry of authenticated user or False
:rtype: dictionary of attributes
"""
if not password:
return False
entry = False
filter = filter_format(conf['ldap_filter'], (login,))
try:
results = self.query(conf, filter)
# Get rid of (None, attrs) for searchResultReference replies
results = [i for i in results if i[0]]
if results and len(results) == 1:
dn = results[0][0]
conn = self.connect(conf)
conn.simple_bind_s(dn, password)
conn.unbind()
entry = results[0]
except ldap.INVALID_CREDENTIALS:
return False
except ldap.LDAPError, e:
_logger.error('An LDAP exception occurred: %s', e)
return entry
def query(self, conf, filter, retrieve_attributes=None):
"""
Query an LDAP server with the filter argument and scope subtree.
Allow for all authentication methods of the simple authentication
method:
- authenticated bind (non-empty binddn + valid password)
- anonymous bind (empty binddn + empty password)
- unauthenticated authentication (non-empty binddn + empty password)
.. seealso::
:rfc:`4513#section-5.1` - LDAP: Simple Authentication Method.
:param dict conf: LDAP configuration
:param filter: valid LDAP filter
:param list retrieve_attributes: LDAP attributes to be retrieved. \
If not specified, return all attributes.
:return: ldap entries
:rtype: list of tuples (dn, attrs)
"""
results = []
try:
conn = self.connect(conf)
conn.simple_bind_s(conf['ldap_binddn'] or '',
conf['ldap_password'] or '')
results = conn.search_st(conf['ldap_base'], ldap.SCOPE_SUBTREE,
filter, retrieve_attributes, timeout=60)
conn.unbind()
except ldap.INVALID_CREDENTIALS:
_logger.error('LDAP bind failed.')
except ldap.LDAPError, e:
_logger.error('An LDAP exception occurred: %s', e)
return results
def map_ldap_attributes(self, cr, uid, conf, login, ldap_entry):
"""
Compose values for a new resource of model res_users,
based upon the retrieved ldap entry and the LDAP settings.
:param dict conf: LDAP configuration
:param login: the new user's login
:param tuple ldap_entry: single LDAP result (dn, attrs)
:return: parameters for a new resource of model res_users
:rtype: dict
"""
values = { 'name': ldap_entry[1]['cn'][0],
'login': login,
'company_id': conf['company']
}
return values
def get_or_create_user(self, cr, uid, conf, login, ldap_entry,
context=None):
"""
Retrieve an active resource of model res_users with the specified
login. Create the user if it is not initially found.
:param dict conf: LDAP configuration
:param login: the user's login
:param tuple ldap_entry: single LDAP result (dn, attrs)
:return: res_users id
:rtype: int
"""
user_id = False
login = tools.ustr(login.lower())
cr.execute("SELECT id, active FROM res_users WHERE lower(login)=%s", (login,))
res = cr.fetchone()
if res:
if res[1]:
user_id = res[0]
elif conf['create_user']:
_logger.debug("Creating new Odoo user \"%s\" from LDAP" % login)
user_obj = self.pool['res.users']
values = self.map_ldap_attributes(cr, uid, conf, login, ldap_entry)
if conf['user']:
values['active'] = True
user_id = user_obj.copy(cr, SUPERUSER_ID, conf['user'],
default=values)
else:
user_id = user_obj.create(cr, SUPERUSER_ID, values)
return user_id
_columns = {
'sequence': fields.integer('Sequence'),
'company': fields.many2one('res.company', 'Company', required=True,
ondelete='cascade'),
'ldap_server': fields.char('LDAP Server address', required=True),
'ldap_server_port': fields.integer('LDAP Server port', required=True),
'ldap_binddn': fields.char('LDAP binddn',
help=("The user account on the LDAP server that is used to query "
"the directory. Leave empty to connect anonymously.")),
'ldap_password': fields.char('LDAP password',
help=("The password of the user account on the LDAP server that is "
"used to query the directory.")),
'ldap_filter': fields.char('LDAP filter', required=True),
'ldap_base': fields.char('LDAP base', required=True),
'user': fields.many2one('res.users', 'Template User',
help="User to copy when creating new users"),
'create_user': fields.boolean('Create user',
help="Automatically create local user accounts for new users authenticating via LDAP"),
'ldap_tls': fields.boolean('Use TLS',
help="Request secure TLS/SSL encryption when connecting to the LDAP server. "
"This option requires a server with STARTTLS enabled, "
"otherwise all authentication attempts will fail."),
}
_defaults = {
'ldap_server': '127.0.0.1',
'ldap_server_port': 389,
'sequence': 10,
'create_user': True,
}
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'ldaps': fields.one2many(
'res.company.ldap', 'company', 'LDAP Parameters', copy=True, groups="base.group_system"),
}
class users(osv.osv):
_inherit = "res.users"
def _login(self, db, login, password):
user_id = super(users, self)._login(db, login, password)
if user_id:
return user_id
registry = RegistryManager.get(db)
with registry.cursor() as cr:
cr.execute("SELECT id, active FROM res_users WHERE lower(login)=%s", (login,))
res = cr.fetchone()
if res:
return False
ldap_obj = registry.get('res.company.ldap')
for conf in ldap_obj.get_ldap_dicts(cr):
entry = ldap_obj.authenticate(conf, login, password)
if entry:
user_id = ldap_obj.get_or_create_user(
cr, SUPERUSER_ID, conf, login, entry)
if user_id:
break
return user_id
def check_credentials(self, cr, uid, password):
try:
super(users, self).check_credentials(cr, uid, password)
except openerp.exceptions.AccessDenied:
cr.execute('SELECT login FROM res_users WHERE id=%s AND active=TRUE',
(int(uid),))
res = cr.fetchone()
if res:
ldap_obj = self.pool['res.company.ldap']
for conf in ldap_obj.get_ldap_dicts(cr):
if ldap_obj.authenticate(conf, res[0], password):
return
raise
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
M4rtinK/tsubame | core/platform/base_platform_module.py | 1 | 8584 | # -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Base class for Tsubame platform modules.
#----------------------------------------------------------------------------
# Copyright 2017, Martin Kolman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from core import constants
from core.signal import Signal
class PlatformModule(object):
"""A Tsubame base platform module."""
def __init__(self):
self.internet_connectivity_changed = Signal()
@property
def platform_id(self):
"""Return an unique string identifying the device module."""
return None
@property
def device_name(self):
"""Return a human readable name of the device."""
return "unknown device"
@property
def preferred_window_wh(self):
"""Return the preferred application window size in pixels."""
# we'll use VGA as a default value
return 640, 480
@property
def start_in_fullscreen(self):
"""Return if Tsubame should be started fullscreen.
NOTE: this is a default value and can be overridden by a
user-set options key, etc.
"""
return False
@property
def fullscreen_only(self):
"""Report if the platform is fullscreen-only.
Some platforms are basically fullscreen-only (Harmattan),
as applications only switch between fullscreen and a task switcher.
"""
return False
@property
def screen_blanking_control_supported(self):
"""There is no universal way to control screen blanking, so its off by default.
NOTE: Screen blanking can be implemented and enabled in the corresponding
device or gui module.
"""
return False
def pause_screen_blanking(self):
"""Pause screen blanking controlled by device module.
calling this method should pause screen blanking
* on mobile devices, screen balking needs to be paused every n seconds
* on desktop, one call might be enough, still, several calls should
be handled without issues
* also what about restoring the screen blanking on Desktop
once Tsubame exits ?
"""
pass
@property
def supported_gui_module_ids(self):
"""Supported GUI module IDs, ordered by preference from left to right.
THE ":" NOTATION
single GUI modules might support different subsets, the usability of
these subsets can vary based on the current platform
-> this functions enabled device modules to report which GUI subsets
are most suitable for the given platform
-> the string starts with the module id prefix, is separated by : and
continues with the subset id
EXAMPLE: ["QML:harmattan","QML:indep","GTK"]
-> QML GUI with Harmattan Qt Components is preferred,
QML GUI with platform independent Qt Components is less preferred
and the GTK GUI is set as a fallback if everything else fails
CURRENT USAGE
there are different incompatible native Qt Component sets
on various platforms (Harmattan QTC, Plasma Active QTC, Jolla QTC,...)
the QML GUI aims to support most of these components sets to provide
native look & feel and the subset id is used by the device module
to signal the GUI module which QTC component to use
"""
return ["qt5"] # the Qt 5 GUI is the default
@property
def has_notification_support(self):
"""Report if the device provides its own notification method."""
return False
def notify(self, message, msTimeout=0, icon=""):
"""Send a notification using platform/device specific API."""
pass
@property
def has_keyboard(self):
"""Report if the device has a hardware keyboard."""
return True
@property
def has_buttons(self):
"""Report if the device has some usable buttons other than a hardware keyboard."""
if self.has_volume_keys:
return True
else:
return False
@property
def has_volume_keys(self):
"""Report if the device has application-usable volume control keys or their equivalent.
Basically basically just two nearby button that can be used for zooming up/down,
skipping to next/previous and similar actions.
"""
return False
def enable_volume_keys(self):
pass
@property
def profile_path(self):
"""Return path to the main profile folder or None if default path should be used.
:returns: path to the profile folder or None
:rtype: str or None
"""
return None
@property
def needs_quit_button(self):
"""On some platforms applications need to provide their own shutdown buttons."""
return False
@property
def needs_back_button(self):
"""Some platforms (Sailfish OS) don't need a in-UI back button."""
return True
@property
def needs_page_background(self):
"""Some platforms (Sailfish OS) don't need a page background."""
return True
@property
def handles_url_opening(self):
"""Some platform provide specific APIs for URL opening.
For example, on the N900 a special DBUS command not available
elsewhere needs to be used.
"""
return False
def open_url(self, url):
"""Open a URL."""
import webbrowser
webbrowser.open(url)
@property
def connectivity_status(self):
"""Report the current status of internet connectivity on the device.
None - status reporting not supported or status unknown
True - connected to the Internet
False - disconnected from the Internet
"""
connected = constants.InternetConnectivityStatus.OFFLINE
# open the /proc/net/route file
with open('/proc/net/route', 'r') as f:
for line in f:
# the line is delimited by tabulators
lineSplit = line.split('\t')
# check if the length is valid
if len(lineSplit) >= 11:
if lineSplit[1] == '00000000' and lineSplit[7] == '00000000':
# if destination and mask are 00000000,
# it is probably an Internet connection
connected = constants.InternetConnectivityStatus.ONLINE
break
return connected
def enable_internet_connectivity(self):
"""Try to make sure that the device connects to the Internet."""
pass
@property
def device_type(self):
"""Returns type of the current device.
The device can currently be either a PC
(desktop or laptop/notebook),
smartphone or a tablet.
This is currently used mainly for rough
DPI estimation.
Example:
* high resolution & PC -> low DPI
* high resolution & smartphone -> high DPI
* high resolution & smartphone -> low DPI
This could also be used in the future to
use different PC/smartphone/tablet GUI styles.
By default, the device type is unknown.
"""
return None
@property
def qmlscene_command(self):
"""What should be called to start the qmlscene.
:returns: command to run to start qmlscene
:rtype: str
"""
return "qmlscene"
@property
def universal_components_backend(self):
"""Path to a Universal Components backend suitable for the given platform.
We default to the Controls UC backend.
:returns: path to suitable UC backend
:rtype: str
"""
return "controls"
| gpl-3.0 |
jralls/gramps | gramps/plugins/sidebar/dropdownsidebar.py | 10 | 8043 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2005-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# $Id: categorysidebar.py 20634 2012-11-07 17:53:14Z bmcage $
#-------------------------------------------------------------------------
#
# GNOME modules
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.config import config
from gramps.gui.basesidebar import BaseSidebar
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# DropdownSidebar class
#
#-------------------------------------------------------------------------
class DropdownSidebar(BaseSidebar):
"""
A sidebar displaying toggle buttons and buttons with drop-down menus that
allows the user to change the current view.
"""
def __init__(self, dbstate, uistate, categories, views):
self.viewmanager = uistate.viewmanager
self.views = views
self.buttons = []
self.button_handlers = []
self.window = Gtk.ScrolledWindow()
grid = Gtk.Grid()
self.window.add(grid)
self.window.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
self.window.show()
use_text = config.get('interface.sidebar-text')
for cat_num, cat_name, cat_icon in categories:
self.__make_category(grid, use_text, cat_num, cat_name, cat_icon)
grid.show_all()
def get_top(self):
"""
Return the top container widget for the GUI.
"""
return self.window
def view_changed(self, cat_num, view_num):
"""
Called when the active view is changed.
"""
# Set new button as selected
self.__handlers_block()
for index, button in enumerate(self.buttons):
if index == cat_num:
button.set_active(True)
else:
button.set_active(False)
self.__handlers_unblock()
def __handlers_block(self):
"""
Block signals to the buttons to prevent spurious events.
"""
for idx in range(len(self.buttons)):
self.buttons[idx].handler_block(self.button_handlers[idx])
def __handlers_unblock(self):
"""
Unblock signals to the buttons.
"""
for idx in range(len(self.buttons)):
self.buttons[idx].handler_unblock(self.button_handlers[idx])
def cb_view_clicked(self, radioaction, current, cat_num):
"""
Called when a button causes a view change.
"""
view_num = radioaction.get_current_value()
self.viewmanager.goto_page(cat_num, view_num)
def __category_clicked(self, button, cat_num):
"""
Called when a category button is clicked.
"""
# Make the button active. If it was already active the category will
# not change.
button.set_active(True)
self.viewmanager.goto_page(cat_num, None)
def __view_clicked(self, button, cat_num):
"""
Called when a view drop-down arrow is clicked.
"""
self.menu = Gtk.Menu()
self.menu.set_reserve_toggle_size(False)
for item in self.views[cat_num]:
menuitem = Gtk.MenuItem()
hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
label = Gtk.Label(label=item[1])
image = Gtk.Image.new_from_icon_name(item[2], Gtk.IconSize.MENU)
hbox.pack_start(image, False, False, 3)
hbox.pack_start(label, False, False, 3)
hbox.show_all()
menuitem.add(hbox)
menuitem.connect("activate", self.cb_menu_clicked, cat_num, item[0])
menuitem.show()
self.menu.append(menuitem)
self.menu.popup(None, None, cb_menu_position, button, 0, 0)
def cb_menu_clicked(self, menuitem, cat_num, view_num):
"""
Called when a view is selected from a drop-down menu.
"""
self.viewmanager.goto_page(cat_num, view_num)
def __make_category(self, grid, use_text, cat_num, cat_name, cat_icon):
"""
Create a row in the sidebar for a category.
"""
# create the button
button = Gtk.ToggleButton()
button.set_relief(Gtk.ReliefStyle.NONE)
self.buttons.append(button)
# create the drop-down button to display views
if len(self.views[cat_num]) > 1:
dropdown = Gtk.Button()
dropdown.set_relief(Gtk.ReliefStyle.NONE)
arrow = Gtk.Arrow(arrow_type=Gtk.ArrowType.DOWN,
shadow_type=Gtk.ShadowType.NONE)
dropdown.add(arrow)
dropdown.connect('clicked', self.__view_clicked, cat_num)
dropdown.set_tooltip_text(_('Click to select a view'))
grid.attach(dropdown, 1, cat_num, 1, 1)
# add the tooltip
button.set_tooltip_text(cat_name)
# connect the signal, along with the cat_num as user data
handler_id = button.connect('clicked', self.__category_clicked, cat_num)
self.button_handlers.append(handler_id)
button.show()
# add the image. If we are using text, use the BUTTON (larger) size.
# otherwise, use the smaller size
hbox = Gtk.Box()
hbox.show()
image = Gtk.Image()
if use_text:
image.set_from_icon_name(cat_icon, Gtk.IconSize.BUTTON)
else:
image.set_from_icon_name(cat_icon, Gtk.IconSize.DND)
image.show()
hbox.pack_start(image, False, False, 0)
hbox.set_spacing(4)
# add text if requested
if use_text:
label = Gtk.Label(label=cat_name)
label.show()
hbox.pack_start(label, False, True, 0)
button.add(hbox)
# Enable view switching during DnD
button.drag_dest_set(0, [], 0)
button.connect('drag_motion', self.cb_switch_page_on_dnd, cat_num)
grid.attach(button, 0, cat_num, 1, 1)
def cb_switch_page_on_dnd(self, widget, context, xpos, ypos, time, page_no):
"""
Switches the page based on drag and drop.
"""
self.__handlers_block()
if self.viewmanager.notebook.get_current_page() != page_no:
self.viewmanager.notebook.set_current_page(page_no)
self.__handlers_unblock()
def cb_menu_position(*args):
"""
Determine the position of the popup menu.
"""
# takes two argument: menu, button
if len(args) == 2:
menu = args[0]
button = args[1]
# broken introspection can't handle MenuPositionFunc annotations corectly
else:
menu = args[0]
button = args[3]
ret_val, x_pos, y_pos = button.get_window().get_origin()
x_pos += button.get_allocation().x
y_pos += button.get_allocation().y + button.get_allocation().height
return (x_pos, y_pos, False)
| gpl-2.0 |
jashandeep-sohi/aiohttp | tests/test_py35/test_streams_35.py | 3 | 2188 | import pytest
from aiohttp import streams
DATA = b'line1\nline2\nline3\n'
def chunkify(seq, n):
for i in range(0, len(seq), n):
yield seq[i:i+n]
def create_stream(loop):
stream = streams.StreamReader(loop=loop)
stream.feed_data(DATA)
stream.feed_eof()
return stream
@pytest.mark.run_loop
async def test_stream_reader_lines(loop):
line_iter = iter(DATA.splitlines(keepends=True))
async for line in create_stream(loop):
assert line == next(line_iter, None)
pytest.raises(StopIteration, next, line_iter)
@pytest.mark.run_loop
async def test_stream_reader_chunks_complete(loop):
"""Tests if chunked iteration works if the chunking works out
(i.e. the data is divisible by the chunk size)
"""
chunk_iter = chunkify(DATA, 9)
async for line in create_stream(loop).iter_chunked(9):
assert line == next(chunk_iter, None)
pytest.raises(StopIteration, next, chunk_iter)
@pytest.mark.run_loop
async def test_stream_reader_chunks_incomplete(loop):
"""Tests if chunked iteration works if the last chunk is incomplete"""
chunk_iter = chunkify(DATA, 8)
async for line in create_stream(loop).iter_chunked(8):
assert line == next(chunk_iter, None)
pytest.raises(StopIteration, next, chunk_iter)
@pytest.mark.run_loop
async def test_data_queue_empty(loop):
"""Tests that async looping yields nothing if nothing is there"""
buffer = streams.DataQueue(loop=loop)
buffer.feed_eof()
async for _ in buffer: # NOQA
assert False
@pytest.mark.run_loop
async def test_data_queue_items(loop):
"""Tests that async looping yields objects identically"""
buffer = streams.DataQueue(loop=loop)
items = [object(), object()]
buffer.feed_data(items[0], 1)
buffer.feed_data(items[1], 1)
buffer.feed_eof()
item_iter = iter(items)
async for item in buffer:
assert item is next(item_iter, None)
pytest.raises(StopIteration, next, item_iter)
@pytest.mark.run_loop
async def test_stream_reader_iter_any(loop):
it = iter([b'line1\nline2\nline3\n'])
async for raw in create_stream(loop).iter_any():
assert raw == next(it)
| apache-2.0 |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.py | 559 | 8469 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import minidom, Node
import weakref
from . import _base
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList(object):
def __init__(self, element):
self.element = element
def __iter__(self):
return list(self.element.attributes.items()).__iter__()
def __setitem__(self, name, value):
self.element.setAttribute(name, value)
def __len__(self):
return len(list(self.element.attributes.items()))
def items(self):
return [(item[0], item[1]) for item in
list(self.element.attributes.items())]
def keys(self):
return list(self.element.attributes.keys())
def __getitem__(self, name):
return self.element.getAttribute(name)
def __contains__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.hasAttribute(name)
class NodeBuilder(_base.Node):
def __init__(self, element):
_base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self: hasattr(self.element, "namespaceURI")
and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(_base.TreeBuilder):
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data = data
if parent != self:
_base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
if not Node.TEXT_NODE in self.dom._child_node_types:
self.dom._child_node_types = list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
implementation = DomImplementation
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
(' ' * indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
else:
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI is not None):
name = "%s %s" % (constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>" % (' ' * indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s" % (constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
return locals()
# The actual means to get a module!
getDomModule = moduleFactoryFactory(getDomBuilder)
| agpl-3.0 |
Medigate/cutiuta-server | cutiuta-server/env/lib/python3.4/site-packages/django/views/generic/base.py | 115 | 7583 | from __future__ import unicode_literals
import logging
from functools import update_wrapper
from django import http
from django.core.exceptions import ImproperlyConfigured
from django.template.response import TemplateResponse
from django.urls import NoReverseMatch, reverse
from django.utils import six
from django.utils.decorators import classonlymethod
logger = logging.getLogger('django.request')
class ContextMixin(object):
"""
A default context mixin that passes the keyword arguments received by
get_context_data as the template context.
"""
def get_context_data(self, **kwargs):
if 'view' not in kwargs:
kwargs['view'] = self
return kwargs
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r. as_view "
"only accepts arguments that are already "
"attributes of the class." % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
self.request = request
self.args = args
self.kwargs = kwargs
return self.dispatch(request, *args, **kwargs)
view.view_class = cls
view.view_initkwargs = initkwargs
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
logger.warning(
'Method Not Allowed (%s): %s', request.method, request.path,
extra={'status_code': 405, 'request': request}
)
return http.HttpResponseNotAllowed(self._allowed_methods())
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb.
"""
response = http.HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = None
template_engine = None
response_class = TemplateResponse
content_type = None
def render_to_response(self, context, **response_kwargs):
"""
Returns a response, using the `response_class` for this
view, with a template rendered with the given context.
If any keyword arguments are provided, they will be
passed to the constructor of the response class.
"""
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=self.request,
template=self.get_template_names(),
context=context,
using=self.template_engine,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
class TemplateView(TemplateResponseMixin, ContextMixin, View):
"""
A view that renders a template. This view will also pass into the context
any keyword arguments passed by the URLconf.
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""
A view that provides a redirect on any GET request.
"""
permanent = False
url = None
pattern_name = None
query_string = False
def get_redirect_url(self, *args, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
url = self.url % kwargs
elif self.pattern_name:
try:
url = reverse(self.pattern_name, args=args, kwargs=kwargs)
except NoReverseMatch:
return None
else:
return None
args = self.request.META.get('QUERY_STRING', '')
if args and self.query_string:
url = "%s?%s" % (url, args)
return url
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(*args, **kwargs)
if url:
if self.permanent:
return http.HttpResponsePermanentRedirect(url)
else:
return http.HttpResponseRedirect(url)
else:
logger.warning(
'Gone: %s', request.path,
extra={'status_code': 410, 'request': request}
)
return http.HttpResponseGone()
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def patch(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
| gpl-3.0 |
leominov/fabric-bolt | fabric_bolt/accounts/admin.py | 10 | 2323 | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.utils.translation import ugettext_lazy as _
from authtools.admin import UserAdmin
from fabric_bolt.accounts.forms import UserChangeForm, UserCreationForm
class UserChangeAdminFrom(UserChangeForm):
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
def __init__(self, *args, **kwargs):
kwargs.update(user_is_admin=True)
super(UserChangeAdminFrom, self).__init__(*args, **kwargs)
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class DeployUserAdmin(UserAdmin):
# The forms to add and change user instances
form = UserChangeAdminFrom
add_form = UserCreationForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('email', 'first_name', 'last_name', 'last_login', 'is_staff', )
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'template')}),
(_('Permissions'), {'fields': ( 'is_staff', 'is_superuser','user_level', 'is_active' )}),
(_('Important dates'), {'fields': ('last_login', 'date_joined', )}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', )
}),
)
search_fields = ('email', 'first_name', 'last_name', )
ordering = ('email',)
filter_horizontal = ('groups', 'user_permissions',)
def save_model(self, request, obj, form, change):
super(DeployUserAdmin, self).save_model(request, obj, form, change)
form.set_permissions(obj)
# Register the new DeployUserAdmin
# admin.site.register(get_user_model(), DeployUserAdmin) | mit |
redhatrises/freeipa | ipaclient/remote_plugins/2_164/automount.py | 16 | 35304 | #
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
# pylint: disable=unused-import
import six
from . import Command, Method, Object
from ipalib import api, parameters, output
from ipalib.parameters import DefaultFrom
from ipalib.plugable import Registry
from ipalib.text import _
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
if six.PY3:
unicode = str
__doc__ = _("""
Automount
Stores automount(8) configuration for autofs(8) in IPA.
The base of an automount configuration is the configuration file auto.master.
This is also the base location in IPA. Multiple auto.master configurations
can be stored in separate locations. A location is implementation-specific
with the default being a location named 'default'. For example, you can have
locations by geographic region, by floor, by type, etc.
Automount has three basic object types: locations, maps and keys.
A location defines a set of maps anchored in auto.master. This allows you
to store multiple automount configurations. A location in itself isn't
very interesting, it is just a point to start a new automount map.
A map is roughly equivalent to a discrete automount file and provides
storage for keys.
A key is a mount point associated with a map.
When a new location is created, two maps are automatically created for
it: auto.master and auto.direct. auto.master is the root map for all
automount maps for the location. auto.direct is the default map for
direct mounts and is mounted on /-.
An automount map may contain a submount key. This key defines a mount
location within the map that references another map. This can be done
either using automountmap-add-indirect --parentmap or manually
with automountkey-add and setting info to "-type=autofs :<mapname>".
EXAMPLES:
Locations:
Create a named location, "Baltimore":
ipa automountlocation-add baltimore
Display the new location:
ipa automountlocation-show baltimore
Find available locations:
ipa automountlocation-find
Remove a named automount location:
ipa automountlocation-del baltimore
Show what the automount maps would look like if they were in the filesystem:
ipa automountlocation-tofiles baltimore
Import an existing configuration into a location:
ipa automountlocation-import baltimore /etc/auto.master
The import will fail if any duplicate entries are found. For
continuous operation where errors are ignored, use the --continue
option.
Maps:
Create a new map, "auto.share":
ipa automountmap-add baltimore auto.share
Display the new map:
ipa automountmap-show baltimore auto.share
Find maps in the location baltimore:
ipa automountmap-find baltimore
Create an indirect map with auto.share as a submount:
ipa automountmap-add-indirect baltimore --parentmap=auto.share --mount=sub auto.man
This is equivalent to:
ipa automountmap-add-indirect baltimore --mount=/man auto.man
ipa automountkey-add baltimore auto.man --key=sub --info="-fstype=autofs ldap:auto.share"
Remove the auto.share map:
ipa automountmap-del baltimore auto.share
Keys:
Create a new key for the auto.share map in location baltimore. This ties
the map we previously created to auto.master:
ipa automountkey-add baltimore auto.master --key=/share --info=auto.share
Create a new key for our auto.share map, an NFS mount for man pages:
ipa automountkey-add baltimore auto.share --key=man --info="-ro,soft,rsize=8192,wsize=8192 ipa.example.com:/shared/man"
Find all keys for the auto.share map:
ipa automountkey-find baltimore auto.share
Find all direct automount keys:
ipa automountkey-find baltimore --key=/-
Remove the man key from the auto.share map:
ipa automountkey-del baltimore auto.share --key=man
""")
register = Registry()
@register()
class automountkey(Object):
takes_params = (
parameters.Str(
'automountkey',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
label=_(u'Mount information'),
),
parameters.Str(
'description',
required=False,
primary_key=True,
label=_(u'description'),
exclude=('webui', 'cli'),
),
)
@register()
class automountlocation(Object):
takes_params = (
parameters.Str(
'cn',
primary_key=True,
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
@register()
class automountmap(Object):
takes_params = (
parameters.Str(
'automountmapname',
primary_key=True,
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
parameters.Str(
'description',
required=False,
label=_(u'Description'),
),
)
@register()
class automountkey_add(Method):
__doc__ = _("Create a new automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Str(
'automountkey',
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
cli_name='info',
label=_(u'Mount information'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountkey_del(Method):
__doc__ = _("Delete an automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Str(
'automountkey',
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
required=False,
cli_name='info',
label=_(u'Mount information'),
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.ListOfPrimaryKeys(
'value',
),
)
@register()
class automountkey_find(Method):
__doc__ = _("Search for an automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'automountkey',
required=False,
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
required=False,
cli_name='info',
label=_(u'Mount information'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds (0 is unlimited)'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned (0 is unlimited)'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class automountkey_mod(Method):
__doc__ = _("Modify an automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Str(
'automountkey',
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
required=False,
cli_name='info',
label=_(u'Mount information'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'delattr',
required=False,
multivalue=True,
doc=_(u'Delete an attribute/value pair. The option will be evaluated\nlast, after all sets and adds.'),
exclude=('webui',),
),
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Str(
'newautomountinformation',
required=False,
cli_name='newinfo',
label=_(u'New mount information'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Str(
'rename',
required=False,
label=_(u'Rename'),
doc=_(u'Rename the automount key object'),
exclude=('webui',),
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountkey_show(Method):
__doc__ = _("Display an automount key.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Str(
'automountmapautomountmapname',
cli_name='automountmap',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Str(
'automountkey',
cli_name='key',
label=_(u'Key'),
doc=_(u'Automount key name.'),
),
parameters.Str(
'automountinformation',
required=False,
cli_name='info',
label=_(u'Mount information'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountlocation_add(Method):
__doc__ = _("Create a new automount location.")
takes_args = (
parameters.Str(
'cn',
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
takes_options = (
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountlocation_del(Method):
__doc__ = _("Delete an automount location.")
takes_args = (
parameters.Str(
'cn',
multivalue=True,
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.ListOfPrimaryKeys(
'value',
),
)
@register()
class automountlocation_find(Method):
__doc__ = _("Search for an automount location.")
takes_args = (
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'cn',
required=False,
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds (0 is unlimited)'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned (0 is unlimited)'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'pkey_only',
required=False,
label=_(u'Primary key only'),
doc=_(u'Results should contain primary key attribute only ("location")'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class automountlocation_show(Method):
__doc__ = _("Display an automount location.")
takes_args = (
parameters.Str(
'cn',
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountlocation_tofiles(Method):
__doc__ = _("Generate automount files for a specific location.")
takes_args = (
parameters.Str(
'cn',
cli_name='location',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
)
takes_options = (
)
has_output = (
output.Output(
'result',
),
)
@register()
class automountmap_add(Method):
__doc__ = _("Create a new automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Str(
'automountmapname',
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountmap_add_indirect(Method):
__doc__ = _("Create a new indirect mount point.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Str(
'automountmapname',
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'key',
cli_name='mount',
label=_(u'Mount point'),
),
parameters.Str(
'parentmap',
required=False,
label=_(u'Parent map'),
doc=_(u'Name of parent automount map (default: auto.master).'),
default=u'auto.master',
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountmap_del(Method):
__doc__ = _("Delete an automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Str(
'automountmapname',
multivalue=True,
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.ListOfPrimaryKeys(
'value',
),
)
@register()
class automountmap_find(Method):
__doc__ = _("Search for an automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'automountmapname',
required=False,
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds (0 is unlimited)'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned (0 is unlimited)'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'pkey_only',
required=False,
label=_(u'Primary key only'),
doc=_(u'Results should contain primary key attribute only ("map")'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class automountmap_mod(Method):
__doc__ = _("Modify an automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Str(
'automountmapname',
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'delattr',
required=False,
multivalue=True,
doc=_(u'Delete an attribute/value pair. The option will be evaluated\nlast, after all sets and adds.'),
exclude=('webui',),
),
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class automountmap_show(Method):
__doc__ = _("Display an automount map.")
takes_args = (
parameters.Str(
'automountlocationcn',
cli_name='automountlocation',
label=_(u'Location'),
doc=_(u'Automount location name.'),
),
parameters.Str(
'automountmapname',
cli_name='map',
label=_(u'Map'),
doc=_(u'Automount map name.'),
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
| gpl-3.0 |
SESoS/SIMVA-SoS | src/new main/Integrated/prototype/Verifier.py | 2 | 4222 | import random
from datetime import datetime
random.seed(datetime.now())
class Verifier(object):
def __init__(self, checker):
self.propertyChecker = checker
def verify(self, simulationLogs, verificationProperty):
check = True
for simulationLog in simulationLogs:
if not self.propertyChecker.check(simulationLog, verificationProperty):
check = False
return check
class SPRT(Verifier):
def __init__(self, checker):
super(SPRT, self).__init__(checker)
self.alpha = 0.05
self.beta = 0.05
self.delta = 0.01
self.minimumSample = 2
def verifyExistedLogs(self, simulationLogs, verificationProperty):
totalResult = True
probability = 0.
for i in range(1, 100):
theta = i * 0.01
numOfSamples = 0
numOfTrue = 0
random.shuffle(simulationLogs)
while self.isSampleNeeded(numOfSamples, numOfTrue, theta):
logLen = len(simulationLogs)
if not numOfSamples < logLen:
print("Lack of simulation logs:", logLen)
break
if self.propertyChecker.check(simulationLogs[numOfSamples], verificationProperty):
numOfTrue = numOfTrue + 1
numOfSamples = numOfSamples + 1
result = self.isSatisfied(numOfSamples, numOfTrue, theta) #todo: 각 theta에 대한 결정 함수 필요, 여기서 alpha, beta, delta 사용
print('theta:', format(theta, ".2f"), ' num of samples:', numOfSamples, ' num of true:', numOfTrue, ' result:', result)
if totalResult:
if not result:
totalResult = False
probability = theta
print('Probability: about', probability * 100, '%')
def verifyWithSimulator(self, simulator, verificationProperty, maxRepeat):
maxNumOfSamples = maxRepeat
totalResult = True
probability = 0.
for i in range(1, 100):
theta = i * 0.01
numOfSamples = 0
numOfTrue = 0
while self.isSampleNeeded(numOfSamples, numOfTrue, theta):
if not numOfSamples < maxNumOfSamples:
print("Over maximum repeat:", maxNumOfSamples)
break
simulationLog = simulator.run()
if self.propertyChecker.check(simulationLog, verificationProperty):
numOfTrue = numOfTrue + 1
numOfSamples = numOfSamples + 1
result = self.isSatisfied(numOfSamples, numOfTrue, theta) #todo: 각 theta에 대한 결정 함수 필요, 여기서 alpha, beta, delta 사용
print('theta:', format(theta, ".2f"), ' num of samples:', numOfSamples, ' num of true:', numOfTrue, ' result:', result)
if totalResult:
if not result:
totalResult = False
probability = theta
print('Probability: about', probability*100, '%')
def isSampleNeeded(self, numOfSamples, numOfTrue, theta): #todo: 샘플 필요한지
if numOfSamples < self.minimumSample:
return True
h0Threshold = self.beta/(1-self.alpha)
h1Threshold = (1-self.beta)/self.alpha
v = self.getV(numOfSamples, numOfTrue, theta)
if v <= h0Threshold:
return False
elif v >= h1Threshold:
return False
else:
return True
def isSatisfied(self, numOfSamples, numOfTrue, theta):
h0Threshold = self.beta / (1 - self.alpha)
v = self.getV(numOfSamples, numOfTrue, theta)
if v <= h0Threshold:
return True
else:
return False
def getV(self, numOfSamples, numOfTrue, theta): #todo
p0 = theta + self.delta
p1 = theta - self.delta
numOfFalse = numOfSamples - numOfTrue
p1m = ((p1 ** numOfTrue) * ((1 - p1) ** numOfFalse))
p0m = ((p0 ** numOfTrue) * ((1 - p0) ** numOfFalse))
if p0m == 0:
p1m = p1m + 0.000000001
p0m = p0m + 0.000000001
v = p1m / p0m
return v
| apache-2.0 |
Ronak6892/servo | tests/wpt/harness/wptrunner/wptmanifest/tests/test_serializer.py | 131 | 4691 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from cStringIO import StringIO
from .. import parser, serializer
class TokenizerTest(unittest.TestCase):
def setUp(self):
self.serializer = serializer.ManifestSerializer()
self.parser = parser.Parser()
def serialize(self, input_str):
return self.serializer.serialize(self.parser.parse(input_str))
def compare(self, input_str, expected=None):
if expected is None:
expected = input_str
expected = expected.encode("utf8")
actual = self.serialize(input_str)
self.assertEquals(actual, expected)
def test_0(self):
self.compare("""key: value
[Heading 1]
other_key: other_value
""")
def test_1(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or b: other_value
""")
def test_2(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or b: other_value
fallback_value
""")
def test_3(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == 1: other_value
fallback_value
""")
def test_4(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "1": other_value
fallback_value
""")
def test_5(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "abc"[1]: other_value
fallback_value
""")
def test_6(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "abc"[c]: other_value
fallback_value
""")
def test_7(self):
self.compare("""key: value
[Heading 1]
other_key:
if (a or b) and c: other_value
fallback_value
""",
"""key: value
[Heading 1]
other_key:
if a or b and c: other_value
fallback_value
""")
def test_8(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or (b and c): other_value
fallback_value
""")
def test_9(self):
self.compare("""key: value
[Heading 1]
other_key:
if not (a and b): other_value
fallback_value
""")
def test_10(self):
self.compare("""key: value
[Heading 1]
some_key: some_value
[Heading 2]
other_key: other_value
""")
def test_11(self):
self.compare("""key:
if not a and b and c and d: true
""")
def test_12(self):
self.compare("""[Heading 1]
key: [a:1, b:2]
""")
def test_13(self):
self.compare("""key: [a:1, "b:#"]
""")
def test_14(self):
self.compare("""key: [","]
""")
def test_15(self):
self.compare("""key: ,
""")
def test_16(self):
self.compare("""key: ["]", b]
""")
def test_17(self):
self.compare("""key: ]
""")
def test_18(self):
self.compare("""key: \]
""", """key: ]
""")
def test_escape_0(self):
self.compare(r"""k\t\:y: \a\b\f\n\r\t\v""",
r"""k\t\:y: \x07\x08\x0c\n\r\t\x0b
""")
def test_escape_1(self):
self.compare(r"""k\x00: \x12A\x45""",
r"""k\x00: \x12AE
""")
def test_escape_2(self):
self.compare(r"""k\u0045y: \u1234A\uABc6""",
u"""kEy: \u1234A\uabc6
""")
def test_escape_3(self):
self.compare(r"""k\u0045y: \u1234A\uABc6""",
u"""kEy: \u1234A\uabc6
""")
def test_escape_4(self):
self.compare(r"""key: '\u1234A\uABc6'""",
u"""key: \u1234A\uabc6
""")
def test_escape_5(self):
self.compare(r"""key: [\u1234A\uABc6]""",
u"""key: [\u1234A\uabc6]
""")
def test_escape_6(self):
self.compare(r"""key: [\u1234A\uABc6\,]""",
u"""key: ["\u1234A\uabc6,"]
""")
def test_escape_7(self):
self.compare(r"""key: [\,\]\#]""",
r"""key: [",]#"]
""")
def test_escape_8(self):
self.compare(r"""key: \#""",
r"""key: "#"
""")
def test_escape_9(self):
self.compare(r"""key: \U10FFFFabc""",
u"""key: \U0010FFFFabc
""")
def test_escape_10(self):
self.compare(r"""key: \u10FFab""",
u"""key: \u10FFab
""")
def test_escape_11(self):
self.compare(r"""key: \\ab
""")
def test_atom_1(self):
self.compare(r"""key: @True
""")
def test_atom_2(self):
self.compare(r"""key: @False
""")
def test_atom_3(self):
self.compare(r"""key: @Reset
""")
def test_atom_4(self):
self.compare(r"""key: [a, @Reset, b]
""")
| mpl-2.0 |
thefinn93/CouchPotatoServer | libs/werkzeug/contrib/profiler.py | 91 | 3969 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.profiler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides a simple WSGI profiler middleware for finding
bottlenecks in web application. It uses the :mod:`profile` or
:mod:`cProfile` module to do the profiling and writes the stats to the
stream provided (defaults to stderr).
Example usage::
from werkzeug.contrib.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
try:
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from pstats import Stats
available = True
except ImportError:
available = False
class MergeStream(object):
"""An object that redirects `write` calls to multiple streams.
Use this to log to both `sys.stdout` and a file::
f = open('profiler.log', 'w')
stream = MergeStream(sys.stdout, f)
profiler = ProfilerMiddleware(app, stream)
"""
def __init__(self, *streams):
if not streams:
raise TypeError('at least one stream must be given')
self.streams = streams
def write(self, data):
for stream in self.streams:
stream.write(data)
class ProfilerMiddleware(object):
"""Simple profiler middleware. Wraps a WSGI application and profiles
a request. This intentionally buffers the response so that timings are
more exact.
For the exact meaning of `sort_by` and `restrictions` consult the
:mod:`profile` documentation.
:param app: the WSGI application to profile.
:param stream: the stream for the profiled stats. defaults to stderr.
:param sort_by: a tuple of columns to sort the result by.
:param restrictions: a tuple of profiling strictions.
"""
def __init__(self, app, stream=None,
sort_by=('time', 'calls'), restrictions=()):
if not available:
raise RuntimeError('the profiler is not available because '
'profile or pstat is not installed.')
self._app = app
self._stream = stream or sys.stdout
self._sort_by = sort_by
self._restrictions = restrictions
def __call__(self, environ, start_response):
response_body = []
def catching_start_response(status, headers, exc_info=None):
start_response(status, headers, exc_info)
return response_body.append
def runapp():
appiter = self._app(environ, catching_start_response)
response_body.extend(appiter)
if hasattr(appiter, 'close'):
appiter.close()
p = Profile()
p.runcall(runapp)
body = ''.join(response_body)
stats = Stats(p, stream=self._stream)
stats.sort_stats(*self._sort_by)
self._stream.write('-' * 80)
self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
stats.print_stats(*self._restrictions)
self._stream.write('-' * 80 + '\n\n')
return [body]
def make_action(app_factory, hostname='localhost', port=5000,
threaded=False, processes=1, stream=None,
sort_by=('time', 'calls'), restrictions=()):
"""Return a new callback for :mod:`werkzeug.script` that starts a local
server with the profiler enabled.
::
from werkzeug.contrib import profiler
action_profile = profiler.make_action(make_app)
"""
def action(hostname=('h', hostname), port=('p', port),
threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
run_simple(hostname, port, app, False, None, threaded, processes)
return action
| gpl-3.0 |
Tearund/stories | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py | 2779 | 1665 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypsh output module
gypsh is a GYP shell. It's not really a generator per se. All it does is
fire up an interactive Python session with a few local variables set to the
variables passed to the generator. Like gypd, it's intended as a debugging
aid, to facilitate the exploration of .gyp structures after being processed
by the input module.
The expected usage is "gyp -f gypsh -D OS=desired_os".
"""
import code
import sys
# All of this stuff about generator variables was lovingly ripped from gypd.py.
# That module has a much better description of what's going on and why.
_generator_identity_variables = [
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
]
generator_default_variables = {
}
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
locals = {
'target_list': target_list,
'target_dicts': target_dicts,
'data': data,
}
# Use a banner that looks like the stock Python one and like what
# code.interact uses by default, but tack on something to indicate what
# locals are available, and identify gypsh.
banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
(sys.version, sys.platform, repr(sorted(locals.keys())))
code.interact(banner, local=locals)
| mit |
defionscode/ansible-modules-core | windows/win_reboot.py | 61 | 2471 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION='''
---
module: win_reboot
short_description: Reboot a windows machine
description:
- Reboot a Windows machine, wait for it to go down, come back up, and respond to commands.
version_added: "2.1"
options:
pre_reboot_delay_sec:
description:
- Seconds for shutdown to wait before requesting reboot
default: 2
shutdown_timeout_sec:
description:
- Maximum seconds to wait for shutdown to occur
- Increase this timeout for very slow hardware, large update applications, etc
default: 600
reboot_timeout_sec:
description:
- Maximum seconds to wait for machine to re-appear on the network and respond to a test command
- This timeout is evaluated separately for both network appearance and test command success (so maximum clock time is actually twice this value)
default: 600
connect_timeout_sec:
description:
- Maximum seconds to wait for a single successful TCP connection to the WinRM endpoint before trying again
default: 5
test_command:
description:
- Command to expect success for to determine the machine is ready for management
default: whoami
author:
- Matt Davis (@nitzmahone)
'''
EXAMPLES='''
# unconditionally reboot the machine with all defaults
- win_reboot:
# apply updates and reboot if necessary
- win_updates:
register: update_result
- win_reboot:
when: update_result.reboot_required
# reboot a slow machine that might have lots of updates to apply
- win_reboot:
shutdown_timeout_sec: 3600
reboot_timeout_sec: 3600
'''
RETURNS='''
rebooted:
description: true if the machine was rebooted
returned: always
type: boolean
sample: true
'''
| gpl-3.0 |
wkfwkf/statsmodels | statsmodels/tsa/x13.py | 7 | 23281 | """
Run x12/x13-arima specs in a subprocess from Python and curry results back
into python.
Notes
-----
Many of the functions are called x12. However, they are also intended to work
for x13. If this is not the case, it's a bug.
"""
from __future__ import print_function
import os
import subprocess
import tempfile
import re
from warnings import warn
import pandas as pd
from statsmodels.compat.python import iteritems
from statsmodels.tools.tools import Bunch
from statsmodels.tools.sm_exceptions import (X13NotFoundError,
IOWarning, X13Error,
X13Warning)
__all__ = ["x13_arima_select_order", "x13_arima_analysis"]
_binary_names = ('x13as.exe', 'x13as', 'x12a.exe', 'x12a')
class _freq_to_period:
def __getitem__(self, key):
if key.startswith('M'):
return 12
elif key.startswith('Q'):
return 4
_freq_to_period = _freq_to_period()
_period_to_freq = {12 : 'M', 4 : 'Q'}
_log_to_x12 = {True : 'log', False : 'none', None : 'auto'}
_bool_to_yes_no = lambda x : 'yes' if x else 'no'
def _find_x12(x12path=None, prefer_x13=True):
"""
If x12path is not given, then either x13as[.exe] or x12a[.exe] must
be found on the PATH. Otherwise, the environmental variable X12PATH or
X13PATH must be defined. If prefer_x13 is True, only X13PATH is searched
for. If it is false, only X12PATH is searched for.
"""
global _binary_names
if x12path is not None and x12path.endswith(_binary_names):
# remove binary from path if given
x12path = os.path.dirname(x12path)
if not prefer_x13: # search for x12 first
_binary_names = _binary_names[::-1]
if x12path is None:
x12path = os.getenv("X12PATH", "")
if not x12path:
x12path = os.getenv("X13PATH", "")
elif x12path is None:
x12path = os.getenv("X13PATH", "")
if not x12path:
x12path = os.getenv("X12PATH", "")
for binary in _binary_names:
x12 = os.path.join(x12path, binary)
try:
subprocess.check_call(x12, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return x12
except OSError:
pass
else:
return False
def _check_x12(x12path=None):
x12path = _find_x12(x12path)
if not x12path:
raise X13NotFoundError("x12a and x13as not found on path. Give the "
"path, put them on PATH, or set the "
"X12PATH or X13PATH environmental variable.")
return x12path
def _clean_order(order):
"""
Takes something like (1 1 0)(0 1 1) and returns a arma order, sarma
order tuple. Also accepts (1 1 0) and return arma order and (0, 0, 0)
"""
order = re.findall("\([0-9 ]*?\)", order)
clean = lambda x : tuple(map(int, re.sub("[()]", "", x).split(" ")))
if len(order) > 1:
order, sorder = map(clean, order)
else:
order = clean(order[0])
sorder = (0, 0, 0)
return order, sorder
def run_spec(x12path, specpath, outname=None, meta=False, datameta=False):
if meta and datameta:
raise ValueError("Cannot specify both meta and datameta.")
if meta:
args = [x12path, "-m " + specpath]
elif datameta:
args = [x12path, "-d " + specpath]
else:
args = [x12path, specpath]
if outname:
args += [outname]
return subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def _make_automdl_options(maxorder, maxdiff, diff):
options = "\n"
options += "maxorder = ({0} {1})\n".format(maxorder[0], maxorder[1])
if maxdiff is not None: # maxdiff always takes precedence
options += "maxdiff = ({0} {1})\n".format(maxdiff[0], maxdiff[1])
else:
options += "diff = ({0} {1})\n".format(diff[0], diff[1])
return options
def _make_var_names(exog):
if hasattr(exog, "name"):
var_names = exog.name
elif hasattr(exog, "columns"):
var_names = exog.columns
else:
raise ValueError("exog is not a Series or DataFrame or is unnamed.")
try:
var_names = " ".join(var_names)
except TypeError: # cannot have names that are numbers, pandas default
from statsmodels.base.data import _make_exog_names
if exog.ndim == 1:
var_names = "x1"
else:
var_names = " ".join(_make_exog_names(exog))
return var_names
def _make_regression_options(trading, exog):
if not trading and exog is None: # start regression spec
return ""
reg_spec = "regression{\n"
if trading:
reg_spec += " variables = (td)\n"
if exog is not None:
var_names = _make_var_names(exog)
reg_spec += " user = ({0})\n".format(var_names)
reg_spec += " data = ({0})\n".format("\n".join(map(str,
exog.values.ravel().tolist())))
reg_spec += "}\n" # close out regression spec
return reg_spec
def _make_forecast_options(forecast_years):
if forecast_years is None:
return ""
forecast_spec = "forecast{\n"
forecast_spec += "maxlead = ({0})\n}}\n".format(forecast_years)
return forecast_spec
def _check_errors(errors):
errors = errors[errors.find("spc:")+4:].strip()
if errors and 'ERROR' in errors:
raise X13Error(errors)
elif errors and 'WARNING' in errors:
warn(errors, X13Warning)
def _convert_out_to_series(x, dates, name):
"""
Convert x to a DataFrame where x is a string in the format given by
x-13arima-seats output.
"""
from StringIO import StringIO
from pandas import read_table
out = read_table(StringIO(x), skiprows=2, header=None)
return out.set_index(dates).rename(columns={1 : name})[name]
def _open_and_read(fname):
# opens a file, reads it, and make sure it's closed
with open(fname, 'r') as fin:
fout = fin.read()
return fout
class Spec(object):
@property
def spec_name(self):
return self.__class__.__name__.replace("Spec", "")
def create_spec(self, **kwargs):
spec = """{name} {{
{options}
}}
"""
return spec.format(name=self.spec_name,
options=self.options)
def set_options(self, **kwargs):
options = ""
for key, value in kwargs.iteritems():
options += "{0}={1}\n".format(key, value)
self.__dict__.update({key : value})
self.options = options
class SeriesSpec(Spec):
"""
Parameters
----------
data
appendbcst : bool
appendfcst : bool
comptype
compwt
decimals
modelspan
name
period
precision
to_print
to_save
span
start
title
type
Notes
-----
Rarely used arguments
divpower
missingcode
missingval
saveprecision
trimzero
"""
def __init__(self, data, name='Unnamed Series', appendbcst=False,
appendfcst=False,
comptype=None, compwt=1, decimals=0, modelspan=(),
period=12, precision=0, to_print=[], to_save=[], span=(),
start=(1, 1), title='', series_type=None, divpower=None,
missingcode=-99999, missingval=1000000000):
appendbcst, appendfcst = map(_bool_to_yes_no, [appendbcst,
appendfcst,
])
series_name = "\"{0}\"".format(name[:64]) # trim to 64 characters
title = "\"{0}\"".format(title[:79]) # trim to 79 characters
self.set_options(data=data, appendbcst=appendbcst,
appendfcst=appendfcst, period=period, start=start,
title=title, name=series_name,
)
def pandas_to_series_spec(x):
# from statsmodels.tools.data import _check_period_index
# check_period_index(x)
if hasattr(x, 'columns'): # convert to series
if len(x.columns) > 1:
raise ValueError("Does not handle DataFrame with more than one "
"column")
x = x[x.columns[0]]
data = "({0})".format("\n".join(map(str, x.values.tolist())))
# get periodicity
# get start / first data
# give it a title
try:
period = _freq_to_period[x.index.freqstr]
except (AttributeError, ValueError):
from pandas.tseries.api import infer_freq
period = _freq_to_period[infer_freq(x.index)]
start_date = x.index[0]
if period == 12:
year, stperiod = start_date.year, start_date.month
elif period == 4:
year, stperiod = start_date.year, start_date.quarter
else: # pragma: no cover
raise ValueError("Only monthly and quarterly periods are supported."
" Please report or send a pull request if you want "
"this extended.")
if hasattr(x, 'name'):
name = x.name or "Unnamed Series"
else:
name = 'Unnamed Series'
series_spec = SeriesSpec(data=data, name=name, period=period,
title=name, start="{0}.{1}".format(year,
stperiod))
return series_spec
def x13_arima_analysis(endog, maxorder=(2, 1), maxdiff=(2, 1), diff=None,
exog=None, log=None, outlier=True, trading=False,
forecast_years=None, retspec=False,
speconly=False, start=None, freq=None,
print_stdout=False, x12path=None, prefer_x13=True):
"""
Perform x13-arima analysis for monthly or quarterly data.
Parameters
----------
endog : array-like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seaonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array-like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_years : int
Number of forecasts produced. The default is one year.
retspec : bool
Whether to return the created specification file. Can be useful for
debugging.
speconly : bool
Whether to create the specification file and then return it without
performing the analysis. Can be useful for debugging.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accapted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or
X12PATH depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
Returns
-------
res : Bunch
A bunch object with the following attributes:
- results : str
The full output from the X12/X13 run.
- seasadj : pandas.Series
The final seasonally adjusted ``endog``
- trend : pandas.Series
The trend-cycle component of ``endog``
- irregular : pandas.Series
The final irregular component of ``endog``
- stdout : str
The captured stdout produced by x12/x13.
- spec : str, optional
Returned if ``retspec`` is True. The only thing returned if
``speconly`` is True.
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output
directory, invoking exog12/X13 in a subprocess, and reading the output
back in.
"""
x12path = _check_x12(x12path)
if not isinstance(endog, (pd.DataFrame, pd.Series)):
if start is None or freq is None:
raise ValueError("start and freq cannot be none if endog is not "
"a pandas object")
endog = pd.Series(endog, index=pd.DatetimeIndex(start=start,
periods=len(endog),
freq=freq))
spec_obj = pandas_to_series_spec(endog)
spec = spec_obj.create_spec()
spec += "transform{{function={0}}}\n".format(_log_to_x12[log])
if outlier:
spec += "outlier{}\n"
options = _make_automdl_options(maxorder, maxdiff, diff)
spec += "automdl{{{0}}}\n".format(options)
spec += _make_regression_options(trading, exog)
spec += _make_forecast_options(forecast_years)
spec += "x11{ save=(d11 d12 d13) }"
if speconly:
return spec
# write it to a tempfile
# TODO: make this more robust - give the user some control?
ftempin = tempfile.NamedTemporaryFile(delete=False, suffix='.spc')
ftempout = tempfile.NamedTemporaryFile(delete=False)
try:
ftempin.write(spec)
ftempin.close()
ftempout.close()
# call x12 arima
p = run_spec(x12path, ftempin.name[:-4], ftempout.name)
p.wait()
stdout = p.stdout.read()
if print_stdout:
print(p.stdout.read())
# check for errors
errors = _open_and_read(ftempout.name + '.err')
_check_errors(errors)
# read in results
results = _open_and_read(ftempout.name + '.out')
seasadj = _open_and_read(ftempout.name + '.d11')
trend = _open_and_read(ftempout.name + '.d12')
irregular = _open_and_read(ftempout.name + '.d13')
finally:
try: # sometimes this gives a permission denied error?
# not sure why. no process should have these open
os.remove(ftempin.name)
os.remove(ftempout.name)
except:
if os.path.exists(ftempin.name):
warn("Failed to delete resource {0}".format(ftempin.name),
IOWarning)
if os.path.exists(ftempout.name):
warn("Failed to delete resource {0}".format(ftempout.name),
IOWarning)
seasadj = _convert_out_to_series(seasadj, endog.index, 'seasadj')
trend = _convert_out_to_series(trend, endog.index, 'trend')
irregular = _convert_out_to_series(irregular, endog.index, 'irregular')
# NOTE: there isn't likely anything in stdout that's not in results
# so may be safe to just suppress and remove it
if not retspec:
res = X13ArimaAnalysisResult(observed=endog, results=results,
seasadj=seasadj, trend=trend,
irregular=irregular, stdout=stdout)
else:
res = X13ArimaAnalysisResult(observed=endog, results=results,
seasadj=seasadj, trend=trend,
irregular=irregular, stdout=stdout,
spec=spec)
return res
def x13_arima_select_order(endog, maxorder=(2, 1), maxdiff=(2, 1), diff=None,
exog=None, log=None, outlier=True, trading=False,
forecast_years=None,
start=None, freq=None, print_stdout=False,
x12path=None, prefer_x13=True):
"""
Perform automatic seaonal ARIMA order identification using x12/x13 ARIMA.
Parameters
----------
endog : array-like, pandas.Series
The series to model. It is best to use a pandas object with a
DatetimeIndex or PeriodIndex. However, you can pass an array-like
object. If your object does not have a dates index then ``start`` and
``freq`` are not optional.
maxorder : tuple
The maximum order of the regular and seasonal ARMA polynomials to
examine during the model identification. The order for the regular
polynomial must be greater than zero and no larger than 4. The
order for the seaonal polynomial may be 1 or 2.
maxdiff : tuple
The maximum orders for regular and seasonal differencing in the
automatic differencing procedure. Acceptable inputs for regular
differencing are 1 and 2. The maximum order for seasonal differencing
is 1. If ``diff`` is specified then ``maxdiff`` should be None.
Otherwise, ``diff`` will be ignored. See also ``diff``.
diff : tuple
Fixes the orders of differencing for the regular and seasonal
differencing. Regular differencing may be 0, 1, or 2. Seasonal
differencing may be 0 or 1. ``maxdiff`` must be None, otherwise
``diff`` is ignored.
exog : array-like
Exogenous variables.
log : bool or None
If None, it is automatically determined whether to log the series or
not. If False, logs are not taken. If True, logs are taken.
outlier : bool
Whether or not outliers are tested for and corrected, if detected.
trading : bool
Whether or not trading day effects are tested for.
forecast_years : int
Number of forecasts produced. The default is one year.
start : str, datetime
Must be given if ``endog`` does not have date information in its index.
Anything accepted by pandas.DatetimeIndex for the start value.
freq : str
Must be givein if ``endog`` does not have date information in its
index. Anything accapted by pandas.DatetimeIndex for the freq value.
print_stdout : bool
The stdout from X12/X13 is suppressed. To print it out, set this
to True. Default is False.
x12path : str or None
The path to x12 or x13 binary. If None, the program will attempt
to find x13as or x12a on the PATH or by looking at X13PATH or X12PATH
depending on the value of prefer_x13.
prefer_x13 : bool
If True, will look for x13as first and will fallback to the X13PATH
environmental variable. If False, will look for x12a first and will
fallback to the X12PATH environmental variable. If x12path points
to the path for the X12/X13 binary, it does nothing.
Returns
-------
results : Bunch
A bunch object that has the following attributes:
- order : tuple
The regular order
- sorder : tuple
The seasonal order
- include_mean : bool
Whether to include a mean or not
- results : str
The full results from the X12/X13 analysis
- stdout : str
The captured stdout from the X12/X13 analysis
Notes
-----
This works by creating a specification file, writing it to a temporary
directory, invoking X12/X13 in a subprocess, and reading the output back
in.
"""
results = x13_arima_analysis(endog, x12path=x12path, exog=exog, log=log,
outlier=outlier, trading=trading,
forecast_years=forecast_years,
maxorder=maxorder, maxdiff=maxdiff, diff=diff,
start=start, freq=freq, prefer_x13=prefer_x13)
model = re.search("(?<=Final automatic model choice : ).*",
results.results)
order = model.group()
if re.search("Mean is not significant", results.results):
include_mean = False
elif re.search("Constant", results.results):
include_mean = True
else:
include_mean = False
order, sorder = _clean_order(order)
res = Bunch(order=order, sorder=sorder, include_mean=include_mean,
results=results.results, stdout=results.stdout)
return res
class X13ArimaAnalysisResult(object):
def __init__(self, **kwargs):
for key, value in iteritems(kwargs):
setattr(self, key, value)
def plot(self):
from statsmodels.graphics.utils import _import_mpl
plt = _import_mpl()
fig, axes = plt.subplots(4, 1, sharex=True)
self.observed.plot(ax=axes[0], legend=False)
axes[0].set_ylabel('Observed')
self.seasadj.plot(ax=axes[1], legend=False)
axes[1].set_ylabel('Seas. Adjusted')
self.trend.plot(ax=axes[2], legend=False)
axes[2].set_ylabel('Trend')
self.irregular.plot(ax=axes[3], legend=False)
axes[3].set_ylabel('Irregular')
fig.tight_layout()
return fig
if __name__ == "__main__":
import numpy as np
from statsmodels.tsa.arima_process import ArmaProcess
np.random.seed(123)
ar = [1, .35, .8]
ma = [1, .8]
arma = ArmaProcess(ar, ma, nobs=100)
assert arma.isstationary()
assert arma.isinvertible()
y = arma.generate_sample()
dates = pd.date_range("1/1/1990", periods=len(y), freq='M')
ts = pd.TimeSeries(y, index=dates)
xpath = "/home/skipper/src/x12arima/x12a"
try:
results = x13_arima_analysis(xpath, ts)
except:
print("Caught exception")
results = x13_arima_analysis(xpath, ts, log=False)
# import pandas as pd
# seas_y = pd.read_csv("usmelec.csv")
# seas_y = pd.TimeSeries(seas_y["usmelec"].values,
# index=pd.DatetimeIndex(seas_y["date"], freq="MS"))
# results = x13_arima_analysis(xpath, seas_y)
| bsd-3-clause |
kristi/Lyx | development/tools/generate_symbols_list.py | 3 | 3004 | #!/usr/bin/python
import sys,string,re,os
def get_code(code, font):
if code < 10:
return code+161
elif code < 32:
return code+163
else:
return code
font_names = {}
symbols = {}
xsymbols = {}
ignore_list = ["not", "braceld", "bracerd", "bracelu", "braceru",
"lmoustache", "rmoustache", "lgroup", "rgroup", "bracevert"]
def process(file):
fh = open(file)
lines = fh.readlines()
fh.close()
n = len(lines)
for i in xrange(n):
line = lines[i]
next_line = ""
if i+1 < n:
next_line = lines[i+1]
# some entries are spread over two lines so we join the next line
# to the current one, (if current line contains a comment, we remove it)
line = string.split(line,'%')[0]+next_line
mo = re.match(r'.*\\DeclareSymbolFont\s*\{(.*?)\}\s*\{(.*?)\}\s*\{(.*?)\}.*', line)
if mo != None:
font_names[mo.group(1)] = mo.group(3)
mo = re.match(r'.*\\DeclareMath(Symbol|Delimiter)\s*\{?\\(\w*?)\}?\s*\{?\\(.*?)\}?\s*\{(.*?)\}\s*\{"(.*?)\}.*', line)
if mo != None:
symbol = mo.group(2)
type = mo.group(3)
font = mo.group(4)
code = mo.group(5)
else:
mo = re.match(r'.*\\edef\\(\w*?)\{.*?\{\\hexnumber@\\sym(.*?)\}(.*?)\}', line)
if mo != None:
symbol = mo.group(1)
type = "mathord"
font = mo.group(2)
code = mo.group(3)
if mo != None and symbol not in ignore_list:
mo2 = re.match(r'\s*\\def\\(.*?)\{', next_line)
if mo2 != None and symbol == mo2.group(1)+"op":
sys.stderr.write("%s -> %s\n" % (symbol, mo2.group(1)))
symbol = mo2.group(1)
if font_names.has_key(font):
font = font_names[font]
code = get_code(string.atoi(code, 16), font)
if code == 0:
continue
xcode = 0
if xsymbols.has_key(symbol):
xcode = xsymbols[symbol]
del xsymbols[symbol]
if symbols.has_key(symbol):
sys.stderr.write(symbol+ " exists\n")
if code != symbols[symbol]:
sys.stderr.write("code is not equal!!!\n")
else:
symbols[symbol] = code
print "%-18s %-4s %3d %3d %-6s" % (symbol,font,code,xcode,type)
path = os.path.split(sys.argv[0])[0]
fh = open(os.path.join(path, "x-font"))
lines = fh.readlines()
fh.close()
for line in lines:
x = string.split(line)
symbol = x[0]
code = string.atoi(x[1],16)
xsymbols[symbol] = code
for file in sys.argv[1:]:
print "# Generated from " + os.path.basename(file) + "\n"
process(file)
print
exceptions = [
("neq", "x", 0, 185, "mathrel"),
("textdegree", "x", 0, 176, "mathord"),
("cong", "x", 0, 64, "mathrel"),
("surd", "x", 0, 214, "mathord")
]
if xsymbols.has_key("leq"):
sys.exit(0)
for x in exceptions:
print "%-18s %-4s %3d %3d %-6s" % x
if xsymbols.has_key(x[0]):
del xsymbols[x[0]]
print """
lyxbar cmsy 161 0 mathord
lyxeq cmr 61 0 mathord
lyxdabar msa 57 0 mathord
lyxright msa 75 0 mathord
lyxleft msa 76 0 mathord
"""
for symbol in xsymbols.keys():
sys.stderr.write(symbol+"\n")
| gpl-2.0 |
ESS-LLP/erpnext-medical | erpnext/patches/v10_0/update_warehouse_address_details.py | 17 | 1260 | # Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
warehouse = frappe.db.sql("""select name, email_id, phone_no, mobile_no, address_line_1,
address_line_2, city, state, pin from `tabWarehouse` where ifnull(address_line_1, '') != ''
or ifnull(mobile_no, '') != ''
or ifnull(email_id, '') != '' """, as_dict=1)
for d in warehouse:
try:
address = frappe.new_doc('Address')
address.name = d.name
address.address_title = d.name
address.address_line1 = d.address_line_1
address.city = d.city
address.state = d.state
address.pincode = d.pin
address.db_insert()
address.append('links',{'link_doctype':'Warehouse','link_name':d.name})
address.links[0].db_insert()
if d.name and (d.email_id or d.mobile_no or d.phone_no):
contact = frappe.new_doc('Contact')
contact.name = d.name
contact.first_name = d.name
contact.mobile_no = d.mobile_no
contact.email_id = d.email_id
contact.phone = d.phone_no
contact.db_insert()
contact.append('links',{'link_doctype':'Warehouse','link_name':d.name})
contact.links[0].db_insert()
except frappe.DuplicateEntryError:
pass
| gpl-3.0 |
Laurawly/tvm-1 | tests/python/contrib/test_random.py | 4 | 5099 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm.contrib import random
from tvm import rpc
import tvm.testing
def test_randint():
m = 10240
n = 10240
A = random.randint(-127, 128, size=(m, n), dtype="int32")
s = te.create_schedule(A.op)
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.random.randint", True):
print("skip because extern function is not available")
return
ctx = tvm.cpu(0)
f = tvm.build(s, [A], target)
a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), ctx)
f(a)
na = a.asnumpy()
assert abs(np.mean(na)) < 0.3
assert np.min(na) == -127
assert np.max(na) == 127
verify()
def test_uniform():
m = 10240
n = 10240
A = random.uniform(0, 1, size=(m, n))
s = te.create_schedule(A.op)
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.random.uniform", True):
print("skip because extern function is not available")
return
ctx = tvm.cpu(0)
f = tvm.build(s, [A], target)
a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), ctx)
f(a)
na = a.asnumpy()
assert abs(np.mean(na) - 0.5) < 1e-1
assert abs(np.min(na) - 0.0) < 1e-3
assert abs(np.max(na) - 1.0) < 1e-3
verify()
def test_normal():
m = 10240
n = 10240
A = random.normal(3, 4, size=(m, n))
s = te.create_schedule(A.op)
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
if not tvm.get_global_func("tvm.contrib.random.normal", True):
print("skip because extern function is not available")
return
ctx = tvm.cpu(0)
f = tvm.build(s, [A], target)
a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), ctx)
f(a)
na = a.asnumpy()
assert abs(np.mean(na) - 3) < 1e-1
assert abs(np.std(na) - 4) < 1e-2
verify()
@tvm.testing.uses_gpu
def test_random_fill():
def test_local(ctx, dtype):
if not tvm.get_global_func("tvm.contrib.random.random_fill", True):
print("skip because extern function is not available")
return
np_ones = np.ones((512, 512), dtype=dtype)
value = tvm.nd.empty(np_ones.shape, np_ones.dtype, ctx)
random_fill = tvm.get_global_func("tvm.contrib.random.random_fill")
random_fill(value)
assert np.count_nonzero(value.asnumpy()) == 512 * 512
# make sure arithmentic doesn't overflow too
np_values = value.asnumpy()
assert np.isfinite(np_values * np_values + np_values).any()
def test_rpc(dtype):
if not tvm.get_global_func("tvm.contrib.random.random_fill", True):
print("skip because extern function is not available")
return
if not tvm.testing.device_enabled("rpc") or not tvm.runtime.enabled("llvm"):
return
np_ones = np.ones((512, 512), dtype=dtype)
server = rpc.Server("localhost")
remote = rpc.connect(server.host, server.port)
value = tvm.nd.empty(np_ones.shape, np_ones.dtype, remote.cpu())
random_fill = remote.get_function("tvm.contrib.random.random_fill")
random_fill(value)
assert np.count_nonzero(value.asnumpy()) == 512 * 512
# make sure arithmentic doesn't overflow too
np_values = value.asnumpy()
assert np.isfinite(np_values * np_values + np_values).any()
for dtype in [
"bool",
"int8",
"uint8",
"int16",
"uint16",
"int32",
"int32",
"int64",
"uint64",
"float16",
"float32",
"float64",
]:
for _, ctx in tvm.testing.enabled_targets():
test_local(ctx, dtype)
test_rpc(dtype)
if __name__ == "__main__":
test_randint()
test_uniform()
test_normal()
test_random_fill()
| apache-2.0 |
TeamTwisted/external_chromium_org | chrome/common/extensions/docs/server2/content_provider.py | 41 | 8409 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mimetypes
import posixpath
import traceback
from compiled_file_system import SingleFile
from directory_zipper import DirectoryZipper
from docs_server_utils import ToUnicode
from file_system import FileNotFoundError
from future import All, Future
from path_canonicalizer import PathCanonicalizer
from path_util import AssertIsValid, IsDirectory, Join, ToDirectory
from special_paths import SITE_VERIFICATION_FILE
from third_party.markdown import markdown
from third_party.motemplate import Motemplate
_MIMETYPE_OVERRIDES = {
# SVG is not supported by mimetypes.guess_type on AppEngine.
'.svg': 'image/svg+xml',
}
class ContentAndType(object):
'''Return value from ContentProvider.GetContentAndType.
'''
def __init__(self, content, content_type, version):
self.content = content
self.content_type = content_type
self.version = version
class ContentProvider(object):
'''Returns file contents correctly typed for their content-types (in the HTTP
sense). Content-type is determined from Python's mimetype library which
guesses based on the file extension.
Typically the file contents will be either str (for binary content) or
unicode (for text content). However, HTML files *may* be returned as
Motemplate templates (if |supports_templates| is True on construction), in
which case the caller will presumably want to Render them.
Zip file are automatically created and returned for .zip file extensions if
|supports_zip| is True.
|default_extensions| is a list of file extensions which are queried when no
file extension is given to GetCanonicalPath/GetContentAndType. Typically
this will include .html.
'''
def __init__(self,
name,
compiled_fs_factory,
file_system,
object_store_creator,
default_extensions=(),
supports_templates=False,
supports_zip=False):
# Public.
self.name = name
self.file_system = file_system
# Private.
self._content_cache = compiled_fs_factory.Create(file_system,
self._CompileContent,
ContentProvider)
self._path_canonicalizer = PathCanonicalizer(file_system,
object_store_creator,
default_extensions)
self._default_extensions = default_extensions
self._supports_templates = supports_templates
if supports_zip:
self._directory_zipper = DirectoryZipper(compiled_fs_factory, file_system)
else:
self._directory_zipper = None
@SingleFile
def _CompileContent(self, path, text):
assert text is not None, path
_, ext = posixpath.splitext(path)
mimetype = _MIMETYPE_OVERRIDES.get(ext, mimetypes.guess_type(path)[0])
if ext == '.md':
# See http://pythonhosted.org/Markdown/extensions
# for details on "extensions=".
content = markdown(ToUnicode(text),
extensions=('extra', 'headerid', 'sane_lists'))
if self._supports_templates:
content = Motemplate(content, name=path)
mimetype = 'text/html'
elif mimetype is None:
content = text
mimetype = 'text/plain'
elif mimetype == 'text/html':
content = ToUnicode(text)
if self._supports_templates:
content = Motemplate(content, name=path)
elif (mimetype.startswith('text/') or
mimetype in ('application/javascript', 'application/json')):
content = ToUnicode(text)
else:
content = text
return ContentAndType(content,
mimetype,
self.file_system.Stat(path).version)
def GetCanonicalPath(self, path):
'''Gets the canonical location of |path|. This class is tolerant of
spelling errors and missing files that are in other directories, and this
returns the correct/canonical path for those.
For example, the canonical path of "browseraction" is probably
"extensions/browserAction.html".
Note that the canonical path is relative to this content provider i.e.
given relative to |path|. It does not add the "serveFrom" prefix which
would have been pulled out in ContentProviders, callers must do that
themselves.
'''
AssertIsValid(path)
base, ext = posixpath.splitext(path)
if self._directory_zipper and ext == '.zip':
# The canonical location of zip files is the canonical location of the
# directory to zip + '.zip'.
return self._path_canonicalizer.Canonicalize(base + '/').rstrip('/') + ext
return self._path_canonicalizer.Canonicalize(path)
def GetContentAndType(self, path):
'''Returns a Future to the ContentAndType of the file at |path|.
'''
AssertIsValid(path)
base, ext = posixpath.splitext(path)
if self._directory_zipper and ext == '.zip':
return (self._directory_zipper.Zip(ToDirectory(base))
.Then(lambda zipped: ContentAndType(zipped,
'application/zip',
None)))
return self._FindFileForPath(path).Then(self._content_cache.GetFromFile)
def GetVersion(self, path):
'''Returns a Future to the version of the file at |path|.
'''
AssertIsValid(path)
base, ext = posixpath.splitext(path)
if self._directory_zipper and ext == '.zip':
stat_future = self.file_system.StatAsync(ToDirectory(base))
else:
stat_future = self._FindFileForPath(path).Then(self.file_system.StatAsync)
return stat_future.Then(lambda stat: stat.version)
def _FindFileForPath(self, path):
'''Finds the real file backing |path|. This may require looking for the
correct file extension, or looking for an 'index' file if it's a directory.
Returns None if no path is found.
'''
AssertIsValid(path)
_, ext = posixpath.splitext(path)
if ext:
# There was already an extension, trust that it's a path. Elsewhere
# up the stack this will be caught if it's not.
return Future(value=path)
def find_file_with_name(name):
'''Tries to find a file in the file system called |name| with one of the
default extensions of this content provider.
If none is found, returns None.
'''
paths = [name + ext for ext in self._default_extensions]
def get_first_path_which_exists(existence):
for exists, path in zip(existence, paths):
if exists:
return path
return None
return (All(self.file_system.Exists(path) for path in paths)
.Then(get_first_path_which_exists))
def find_index_file():
'''Tries to find an index file in |path|, if |path| is a directory.
If not, or if there is no index file, returns None.
'''
def get_index_if_directory_exists(directory_exists):
if not directory_exists:
return None
return find_file_with_name(Join(path, 'index'))
return (self.file_system.Exists(ToDirectory(path))
.Then(get_index_if_directory_exists))
# Try to find a file with the right name. If not, and it's a directory,
# look for an index file in that directory. If nothing at all is found,
# return the original |path| - its nonexistence will be caught up the stack.
return (find_file_with_name(path)
.Then(lambda found: found or find_index_file())
.Then(lambda found: found or path))
def Refresh(self):
futures = [self._path_canonicalizer.Refresh()]
for root, _, files in self.file_system.Walk(''):
for f in files:
futures.append(self.GetContentAndType(Join(root, f)))
# Also cache the extension-less version of the file if needed.
base, ext = posixpath.splitext(f)
if f != SITE_VERIFICATION_FILE and ext in self._default_extensions:
futures.append(self.GetContentAndType(Join(root, base)))
# TODO(kalman): Cache .zip files for each directory (if supported).
return All(futures, except_pass=Exception, except_pass_log=True)
def __repr__(self):
return 'ContentProvider of <%s>' % repr(self.file_system)
| bsd-3-clause |
sgraham/nope | third_party/opus/convert_rtcd_assembler.py | 89 | 1531 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Script for converting celt_pitch_xcorr_arm.s -> celt_pitch_xcorr_arm.S
# using the arm2gnu.pl script.
import os
import sys
USAGE = ('Usage:\n'
'./convert_rtcd_assembler.py arm2gnu_script input_file output_file')
def main(argv):
if len(argv) != 3:
print >> sys.stderr, ('Error: You must pass the following arguments:\n'
' * arm2gnu_script_path\n'
' * input_file\n'
' * output_file')
print USAGE
return 1
arm2gnu_script = os.path.abspath(argv[0])
if not os.path.exists(arm2gnu_script):
print >> sys.stderr, ('Error: Cannot find arm2gnu.pl script at: %s.' %
arm2gnu_script)
return 2
input_file = os.path.abspath(argv[1])
if not os.path.exists(input_file):
print >> sys.stderr, 'Error: Cannot find input file at: %s.' % input_file
return 3
output_file = argv[2]
# Ensure the output file's directory path exists.
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
cmd = ('perl %s %s | '
'sed "s/OPUS_ARM_MAY_HAVE_[A-Z]*/1/g" | '
'sed "/.include/d" '
'> %s') % (arm2gnu_script, input_file, output_file)
print cmd
return os.system(cmd)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
RockySteveJobs/python-for-android | python3-alpha/python3-src/Lib/logging/config.py | 45 | 34770 | # Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, logging.handlers, socket, struct, os, traceback, re
import types, io
try:
import _thread as thread
import threading
except ImportError:
thread = None
from socketserver import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
if sys.platform == "win32":
RESET_ERROR = 10054 #WSAECONNRESET
else:
RESET_ERROR = 104 #ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import configparser
cp = configparser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.read_file(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _encoded(s):
return s if isinstance(s, str) else s.encode('utf-8')
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp["formatters"]["keys"]
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
fs = cp.get(sectname, "format", raw=True, fallback=None)
dfs = cp.get(sectname, "datefmt", raw=True, fallback=None)
c = logging.Formatter
class_name = cp[sectname].get("class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp["handlers"]["keys"]
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
section = cp["handler_%s" % hand]
klass = section["class"]
fmt = section.get("formatter", "")
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = section["args"]
args = eval(args, vars(logging))
h = klass(*args)
if "level" in section:
level = section["level"]
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
target = section.get("target", "")
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _handle_existing_loggers(existing, child_loggers, disable_existing):
"""
When (re)configuring logging, handle loggers which were in the previous
configuration but are not in the new configuration. There's no point
deleting them as other threads may continue to hold references to them;
and by disabling them, you stop them doing any logging.
However, don't disable children of named loggers, as that's probably not
what was intended by the user. Also, allow existing loggers to NOT be
disabled if disable_existing is false.
"""
root = logging.root
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
def _install_loggers(cp, handlers, disable_existing):
"""Create and install loggers"""
# configure the root first
llist = cp["loggers"]["keys"]
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
section = cp["logger_root"]
root = logging.root
log = root
if "level" in section:
level = section["level"]
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort(key=_encoded)
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
section = cp["logger_%s" % log]
qn = section["qualname"]
propagate = section.getint("propagate", fallback=1)
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in section:
level = section["level"]
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = 1
# elif disable_existing_loggers:
# logger.disabled = 1
_handle_existing_loggers(existing, child_loggers, disable_existing)
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, str): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__'):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except Exception as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except Exception as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort(key=_encoded)
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name) + 1 # look after name
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = True
# elif disable_existing:
# logger.disabled = True
_handle_existing_loggers(existing, child_loggers,
disable_existing)
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except Exception as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except Exception as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except Exception as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except Exception as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread:
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
import tempfile
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
chunk = chunk.decode("utf-8")
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except:
#Apply new configuration.
file = io.StringIO(chunk)
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except socket.error as e:
if not isinstance(e.args, tuple):
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
| apache-2.0 |
fkolacek/FIT-VUT | bp-revok/python/lib/python2.7/site-packages/cryptography-0.5.2-py2.7-linux-x86_64.egg/cryptography/hazmat/bindings/commoncrypto/binding.py | 2 | 1658 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import platform
import sys
from cryptography.hazmat.bindings.utils import build_ffi
class Binding(object):
"""
CommonCrypto API wrapper.
"""
_module_prefix = "cryptography.hazmat.bindings.commoncrypto."
_modules = [
"cf",
"common_digest",
"common_hmac",
"common_key_derivation",
"common_cryptor",
"secimport",
"secitem",
"seckey",
"seckeychain",
"sectransform",
]
ffi = None
lib = None
def __init__(self):
self._ensure_ffi_initialized()
@classmethod
def _ensure_ffi_initialized(cls):
if cls.ffi is not None and cls.lib is not None:
return
cls.ffi, cls.lib = build_ffi(
module_prefix=cls._module_prefix,
modules=cls._modules,
extra_link_args=["-framework", "Security"]
)
@classmethod
def is_available(cls):
return sys.platform == "darwin" and list(map(
int, platform.mac_ver()[0].split("."))) >= [10, 8, 0]
| apache-2.0 |
dakoner/keras-molecules | interpolate.py | 4 | 3090 | from __future__ import print_function
import h5py
import numpy
import os
import argparse
import sample
from molecules.model import MoleculeVAE
from molecules.utils import decode_smiles_from_indexes
from molecules.utils import one_hot_array, one_hot_index
SOURCE = 'Cc1ccnc(c1)NC(=O)Cc2cccc3c2cccc3'
DEST = 'c1cc(cc(c1)Cl)NNC(=O)c2cc(cnc2)Br'
LATENT_DIM = 292
STEPS = 100
WIDTH = 120
def get_arguments():
parser = argparse.ArgumentParser(description='Interpolate from source to dest in steps')
parser.add_argument('data', type=str, help='The HDF5 file containing preprocessed data.')
parser.add_argument('model', type=str, help='Trained Keras model to use.')
parser.add_argument('--source', type=str, default=SOURCE,
help='Source SMILES string for interpolation')
parser.add_argument('--dest', type=str, default=DEST,
help='Source SMILES string for interpolation')
parser.add_argument('--latent_dim', type=int, metavar='N', default=LATENT_DIM,
help='Dimensionality of the latent representation.')
parser.add_argument('--width', type=int, default=WIDTH,
help='Dimensionality of the latent representation.')
parser.add_argument('--steps', type=int, default=STEPS,
help='Number of steps to take while interpolating between source and dest')
return parser.parse_args()
def interpolate(source, dest, steps, charset, model, latent_dim, width):
source_just = source.ljust(width)
dest_just = dest.ljust(width)
one_hot_encoded_fn = lambda row: map(lambda x: one_hot_array(x, len(charset)),
one_hot_index(row, charset))
source_encoded = numpy.array(map(one_hot_encoded_fn, source_just))
source_x_latent = model.encoder.predict(source_encoded.reshape(1, width, len(charset)))
dest_encoded = numpy.array(map(one_hot_encoded_fn, dest_just))
dest_x_latent = model.encoder.predict(dest_encoded.reshape(1, width, len(charset)))
step = (dest_x_latent - source_x_latent)/float(steps)
results = []
for i in range(steps):
item = source_x_latent + (step * i)
sampled = model.decoder.predict(item.reshape(1, latent_dim)).argmax(axis=2)[0]
sampled = decode_smiles_from_indexes(sampled, charset)
results.append( (i, item, sampled) )
return results
def main():
args = get_arguments()
if os.path.isfile(args.data):
h5f = h5py.File(args.data, 'r')
charset = list(h5f['charset'][:])
h5f.close()
else:
raise ValueError("Data file %s doesn't exist" % args.data)
model = MoleculeVAE()
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = args.latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
results = interpolate(args.source, args.dest, args.steps, charset, model, args.latent_dim, args.width)
for result in results:
print(result[0], result[2])
if __name__ == '__main__':
main()
| mit |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/astroid/tests/unittest_builder.py | 2 | 28456 | # copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""tests for the astroid builder and rebuilder module"""
import os
import sys
import unittest
from astroid import builder, nodes, InferenceError, NotFoundError
from astroid.bases import YES, BUILTINS
from astroid.manager import AstroidManager
from astroid import test_utils
from astroid.tests import resources
MANAGER = AstroidManager()
IS_PY3 = sys.version_info[0] == 3
class FromToLineNoTest(unittest.TestCase):
def setUp(self):
self.astroid = resources.build_file('data/format.py')
def test_callfunc_lineno(self):
stmts = self.astroid.body
# on line 4:
# function('aeozrijz\
# earzer', hop)
discard = stmts[0]
self.assertIsInstance(discard, nodes.Discard)
self.assertEqual(discard.fromlineno, 4)
self.assertEqual(discard.tolineno, 5)
callfunc = discard.value
self.assertIsInstance(callfunc, nodes.CallFunc)
self.assertEqual(callfunc.fromlineno, 4)
self.assertEqual(callfunc.tolineno, 5)
name = callfunc.func
self.assertIsInstance(name, nodes.Name)
self.assertEqual(name.fromlineno, 4)
self.assertEqual(name.tolineno, 4)
strarg = callfunc.args[0]
self.assertIsInstance(strarg, nodes.Const)
if hasattr(sys, 'pypy_version_info'):
lineno = 4
else:
lineno = 5 # no way for this one in CPython (is 4 actually)
self.assertEqual(strarg.fromlineno, lineno)
self.assertEqual(strarg.tolineno, lineno)
namearg = callfunc.args[1]
self.assertIsInstance(namearg, nodes.Name)
self.assertEqual(namearg.fromlineno, 5)
self.assertEqual(namearg.tolineno, 5)
# on line 10:
# fonction(1,
# 2,
# 3,
# 4)
discard = stmts[2]
self.assertIsInstance(discard, nodes.Discard)
self.assertEqual(discard.fromlineno, 10)
self.assertEqual(discard.tolineno, 13)
callfunc = discard.value
self.assertIsInstance(callfunc, nodes.CallFunc)
self.assertEqual(callfunc.fromlineno, 10)
self.assertEqual(callfunc.tolineno, 13)
name = callfunc.func
self.assertIsInstance(name, nodes.Name)
self.assertEqual(name.fromlineno, 10)
self.assertEqual(name.tolineno, 10)
for i, arg in enumerate(callfunc.args):
self.assertIsInstance(arg, nodes.Const)
self.assertEqual(arg.fromlineno, 10+i)
self.assertEqual(arg.tolineno, 10+i)
def test_function_lineno(self):
stmts = self.astroid.body
# on line 15:
# def definition(a,
# b,
# c):
# return a + b + c
function = stmts[3]
self.assertIsInstance(function, nodes.Function)
self.assertEqual(function.fromlineno, 15)
self.assertEqual(function.tolineno, 18)
return_ = function.body[0]
self.assertIsInstance(return_, nodes.Return)
self.assertEqual(return_.fromlineno, 18)
self.assertEqual(return_.tolineno, 18)
if sys.version_info < (3, 0):
self.assertEqual(function.blockstart_tolineno, 17)
else:
self.skipTest('FIXME http://bugs.python.org/issue10445 '
'(no line number on function args)')
def test_decorated_function_lineno(self):
astroid = test_utils.build_module('''
@decorator
def function(
arg):
print (arg)
''', __name__)
function = astroid['function']
self.assertEqual(function.fromlineno, 3) # XXX discussable, but that's what is expected by pylint right now
self.assertEqual(function.tolineno, 5)
self.assertEqual(function.decorators.fromlineno, 2)
self.assertEqual(function.decorators.tolineno, 2)
if sys.version_info < (3, 0):
self.assertEqual(function.blockstart_tolineno, 4)
else:
self.skipTest('FIXME http://bugs.python.org/issue10445 '
'(no line number on function args)')
def test_class_lineno(self):
stmts = self.astroid.body
# on line 20:
# class debile(dict,
# object):
# pass
class_ = stmts[4]
self.assertIsInstance(class_, nodes.Class)
self.assertEqual(class_.fromlineno, 20)
self.assertEqual(class_.tolineno, 22)
self.assertEqual(class_.blockstart_tolineno, 21)
pass_ = class_.body[0]
self.assertIsInstance(pass_, nodes.Pass)
self.assertEqual(pass_.fromlineno, 22)
self.assertEqual(pass_.tolineno, 22)
def test_if_lineno(self):
stmts = self.astroid.body
# on line 20:
# if aaaa: pass
# else:
# aaaa,bbbb = 1,2
# aaaa,bbbb = bbbb,aaaa
if_ = stmts[5]
self.assertIsInstance(if_, nodes.If)
self.assertEqual(if_.fromlineno, 24)
self.assertEqual(if_.tolineno, 27)
self.assertEqual(if_.blockstart_tolineno, 24)
self.assertEqual(if_.orelse[0].fromlineno, 26)
self.assertEqual(if_.orelse[1].tolineno, 27)
def test_for_while_lineno(self):
for code in ('''
for a in range(4):
print (a)
break
else:
print ("bouh")
''', '''
while a:
print (a)
break
else:
print ("bouh")
'''):
astroid = test_utils.build_module(code, __name__)
stmt = astroid.body[0]
self.assertEqual(stmt.fromlineno, 2)
self.assertEqual(stmt.tolineno, 6)
self.assertEqual(stmt.blockstart_tolineno, 2)
self.assertEqual(stmt.orelse[0].fromlineno, 6) # XXX
self.assertEqual(stmt.orelse[0].tolineno, 6)
def test_try_except_lineno(self):
astroid = test_utils.build_module('''
try:
print (a)
except:
pass
else:
print ("bouh")
''', __name__)
try_ = astroid.body[0]
self.assertEqual(try_.fromlineno, 2)
self.assertEqual(try_.tolineno, 7)
self.assertEqual(try_.blockstart_tolineno, 2)
self.assertEqual(try_.orelse[0].fromlineno, 7) # XXX
self.assertEqual(try_.orelse[0].tolineno, 7)
hdlr = try_.handlers[0]
self.assertEqual(hdlr.fromlineno, 4)
self.assertEqual(hdlr.tolineno, 5)
self.assertEqual(hdlr.blockstart_tolineno, 4)
def test_try_finally_lineno(self):
astroid = test_utils.build_module('''
try:
print (a)
finally:
print ("bouh")
''', __name__)
try_ = astroid.body[0]
self.assertEqual(try_.fromlineno, 2)
self.assertEqual(try_.tolineno, 5)
self.assertEqual(try_.blockstart_tolineno, 2)
self.assertEqual(try_.finalbody[0].fromlineno, 5) # XXX
self.assertEqual(try_.finalbody[0].tolineno, 5)
def test_try_finally_25_lineno(self):
astroid = test_utils.build_module('''
try:
print (a)
except:
pass
finally:
print ("bouh")
''', __name__)
try_ = astroid.body[0]
self.assertEqual(try_.fromlineno, 2)
self.assertEqual(try_.tolineno, 7)
self.assertEqual(try_.blockstart_tolineno, 2)
self.assertEqual(try_.finalbody[0].fromlineno, 7) # XXX
self.assertEqual(try_.finalbody[0].tolineno, 7)
def test_with_lineno(self):
astroid = test_utils.build_module('''
from __future__ import with_statement
with file("/tmp/pouet") as f:
print (f)
''', __name__)
with_ = astroid.body[1]
self.assertEqual(with_.fromlineno, 3)
self.assertEqual(with_.tolineno, 4)
self.assertEqual(with_.blockstart_tolineno, 3)
class BuilderTest(unittest.TestCase):
def setUp(self):
self.builder = builder.AstroidBuilder()
def test_data_build_null_bytes(self):
with self.assertRaises(builder.AstroidBuildingException):
self.builder.string_build('\x00')
def test_missing_newline(self):
"""check that a file with no trailing new line is parseable"""
resources.build_file('data/noendingnewline.py')
def test_missing_file(self):
with self.assertRaises(builder.AstroidBuildingException):
resources.build_file('data/inexistant.py')
def test_inspect_build0(self):
"""test astroid tree build from a living object"""
builtin_ast = MANAGER.ast_from_module_name(BUILTINS)
if not IS_PY3:
fclass = builtin_ast['file']
self.assertIn('name', fclass)
self.assertIn('mode', fclass)
self.assertIn('read', fclass)
self.assertTrue(fclass.newstyle)
self.assertTrue(fclass.pytype(), '%s.type' % BUILTINS)
self.assertIsInstance(fclass['read'], nodes.Function)
# check builtin function has args.args == None
dclass = builtin_ast['dict']
self.assertIsNone(dclass['has_key'].args.args)
# just check type and object are there
builtin_ast.getattr('type')
objectastroid = builtin_ast.getattr('object')[0]
self.assertIsInstance(objectastroid.getattr('__new__')[0], nodes.Function)
# check open file alias
builtin_ast.getattr('open')
# check 'help' is there (defined dynamically by site.py)
builtin_ast.getattr('help')
# check property has __init__
pclass = builtin_ast['property']
self.assertIn('__init__', pclass)
self.assertIsInstance(builtin_ast['None'], nodes.Const)
self.assertIsInstance(builtin_ast['True'], nodes.Const)
self.assertIsInstance(builtin_ast['False'], nodes.Const)
if IS_PY3:
self.assertIsInstance(builtin_ast['Exception'], nodes.Class)
self.assertIsInstance(builtin_ast['NotImplementedError'], nodes.Class)
else:
self.assertIsInstance(builtin_ast['Exception'], nodes.From)
self.assertIsInstance(builtin_ast['NotImplementedError'], nodes.From)
def test_inspect_build1(self):
time_ast = MANAGER.ast_from_module_name('time')
self.assertTrue(time_ast)
self.assertEqual(time_ast['time'].args.defaults, [])
def test_inspect_build2(self):
"""test astroid tree build from a living object"""
try:
from mx import DateTime
except ImportError:
self.skipTest('test skipped: mxDateTime is not available')
else:
dt_ast = self.builder.inspect_build(DateTime)
dt_ast.getattr('DateTime')
# this one is failing since DateTimeType.__module__ = 'builtins' !
#dt_ast.getattr('DateTimeType')
def test_inspect_build3(self):
self.builder.inspect_build(unittest)
@test_utils.require_version(maxver='3.0')
def test_inspect_build_instance(self):
"""test astroid tree build from a living object"""
import exceptions
builtin_ast = self.builder.inspect_build(exceptions)
fclass = builtin_ast['OSError']
# things like OSError.strerror are now (2.5) data descriptors on the
# class instead of entries in the __dict__ of an instance
container = fclass
self.assertIn('errno', container)
self.assertIn('strerror', container)
self.assertIn('filename', container)
def test_inspect_build_type_object(self):
builtin_ast = MANAGER.ast_from_module_name(BUILTINS)
infered = list(builtin_ast.igetattr('object'))
self.assertEqual(len(infered), 1)
infered = infered[0]
self.assertEqual(infered.name, 'object')
infered.as_string() # no crash test
infered = list(builtin_ast.igetattr('type'))
self.assertEqual(len(infered), 1)
infered = infered[0]
self.assertEqual(infered.name, 'type')
infered.as_string() # no crash test
def test_inspect_transform_module(self):
# ensure no cached version of the time module
MANAGER._mod_file_cache.pop(('time', None), None)
MANAGER.astroid_cache.pop('time', None)
def transform_time(node):
if node.name == 'time':
node.transformed = True
MANAGER.register_transform(nodes.Module, transform_time)
try:
time_ast = MANAGER.ast_from_module_name('time')
self.assertTrue(getattr(time_ast, 'transformed', False))
finally:
MANAGER.unregister_transform(nodes.Module, transform_time)
def test_package_name(self):
"""test base properties and method of a astroid module"""
datap = resources.build_file('data/__init__.py', 'data')
self.assertEqual(datap.name, 'data')
self.assertEqual(datap.package, 1)
datap = resources.build_file('data/__init__.py', 'data.__init__')
self.assertEqual(datap.name, 'data')
self.assertEqual(datap.package, 1)
def test_yield_parent(self):
"""check if we added discard nodes as yield parent (w/ compiler)"""
code = """
def yiell(): #@
yield 0
if noe:
yield more
"""
func = test_utils.extract_node(code)
self.assertIsInstance(func, nodes.Function)
stmt = func.body[0]
self.assertIsInstance(stmt, nodes.Discard)
self.assertIsInstance(stmt.value, nodes.Yield)
self.assertIsInstance(func.body[1].body[0], nodes.Discard)
self.assertIsInstance(func.body[1].body[0].value, nodes.Yield)
def test_object(self):
obj_ast = self.builder.inspect_build(object)
self.assertIn('__setattr__', obj_ast)
def test_newstyle_detection(self):
data = '''
class A:
"old style"
class B(A):
"old style"
class C(object):
"new style"
class D(C):
"new style"
__metaclass__ = type
class E(A):
"old style"
class F:
"new style"
'''
mod_ast = test_utils.build_module(data, __name__)
if IS_PY3:
self.assertTrue(mod_ast['A'].newstyle)
self.assertTrue(mod_ast['B'].newstyle)
self.assertTrue(mod_ast['E'].newstyle)
else:
self.assertFalse(mod_ast['A'].newstyle)
self.assertFalse(mod_ast['B'].newstyle)
self.assertFalse(mod_ast['E'].newstyle)
self.assertTrue(mod_ast['C'].newstyle)
self.assertTrue(mod_ast['D'].newstyle)
self.assertTrue(mod_ast['F'].newstyle)
def test_globals(self):
data = '''
CSTE = 1
def update_global():
global CSTE
CSTE += 1
def global_no_effect():
global CSTE2
print (CSTE)
'''
astroid = test_utils.build_module(data, __name__)
self.assertEqual(len(astroid.getattr('CSTE')), 2)
self.assertIsInstance(astroid.getattr('CSTE')[0], nodes.AssName)
self.assertEqual(astroid.getattr('CSTE')[0].fromlineno, 2)
self.assertEqual(astroid.getattr('CSTE')[1].fromlineno, 6)
with self.assertRaises(NotFoundError):
astroid.getattr('CSTE2')
with self.assertRaises(InferenceError):
next(astroid['global_no_effect'].ilookup('CSTE2'))
def test_socket_build(self):
import socket
astroid = self.builder.module_build(socket)
# XXX just check the first one. Actually 3 objects are inferred (look at
# the socket module) but the last one as those attributes dynamically
# set and astroid is missing this.
for fclass in astroid.igetattr('socket'):
#print fclass.root().name, fclass.name, fclass.lineno
self.assertIn('connect', fclass)
self.assertIn('send', fclass)
self.assertIn('close', fclass)
break
def test_gen_expr_var_scope(self):
data = 'l = list(n for n in range(10))\n'
astroid = test_utils.build_module(data, __name__)
# n unavailable outside gen expr scope
self.assertNotIn('n', astroid)
# test n is inferable anyway
n = test_utils.get_name_node(astroid, 'n')
self.assertIsNot(n.scope(), astroid)
self.assertEqual([i.__class__ for i in n.infer()],
[YES.__class__])
def test_no_future_imports(self):
mod = test_utils.build_module("import sys")
self.assertEqual(set(), mod.future_imports)
def test_future_imports(self):
mod = test_utils.build_module("from __future__ import print_function")
self.assertEqual(set(['print_function']), mod.future_imports)
def test_two_future_imports(self):
mod = test_utils.build_module("""
from __future__ import print_function
from __future__ import absolute_import
""")
self.assertEqual(set(['print_function', 'absolute_import']), mod.future_imports)
def test_infered_build(self):
code = '''
class A: pass
A.type = "class"
def A_ass_type(self):
print (self)
A.ass_type = A_ass_type
'''
astroid = test_utils.build_module(code)
lclass = list(astroid.igetattr('A'))
self.assertEqual(len(lclass), 1)
lclass = lclass[0]
self.assertIn('ass_type', lclass.locals)
self.assertIn('type', lclass.locals)
def test_augassign_attr(self):
test_utils.build_module("""
class Counter:
v = 0
def inc(self):
self.v += 1
""", __name__)
# TODO: Check self.v += 1 generate AugAssign(AssAttr(...)),
# not AugAssign(GetAttr(AssName...))
def test_infered_dont_pollute(self):
code = '''
def func(a=None):
a.custom_attr = 0
def func2(a={}):
a.custom_attr = 0
'''
test_utils.build_module(code)
nonetype = nodes.const_factory(None)
self.assertNotIn('custom_attr', nonetype.locals)
self.assertNotIn('custom_attr', nonetype.instance_attrs)
nonetype = nodes.const_factory({})
self.assertNotIn('custom_attr', nonetype.locals)
self.assertNotIn('custom_attr', nonetype.instance_attrs)
def test_asstuple(self):
code = 'a, b = range(2)'
astroid = test_utils.build_module(code)
self.assertIn('b', astroid.locals)
code = '''
def visit_if(self, node):
node.test, body = node.tests[0]
'''
astroid = test_utils.build_module(code)
self.assertIn('body', astroid['visit_if'].locals)
def test_build_constants(self):
'''test expected values of constants after rebuilding'''
code = '''
def func():
return None
return
return 'None'
'''
astroid = test_utils.build_module(code)
none, nothing, chain = [ret.value for ret in astroid.body[0].body]
self.assertIsInstance(none, nodes.Const)
self.assertIsNone(none.value)
self.assertIsNone(nothing)
self.assertIsInstance(chain, nodes.Const)
self.assertEqual(chain.value, 'None')
def test_lgc_classproperty(self):
'''test expected values of constants after rebuilding'''
code = '''
from logilab.common.decorators import classproperty
class A(object):
@classproperty
def hop(cls): #@
return None
'''
method = test_utils.extract_node(code)
self.assertEqual('classmethod', method.type)
class FileBuildTest(unittest.TestCase):
def setUp(self):
self.module = resources.build_file('data/module.py', 'data.module')
def test_module_base_props(self):
"""test base properties and method of a astroid module"""
module = self.module
self.assertEqual(module.name, 'data.module')
self.assertEqual(module.doc, "test module for astroid\n")
self.assertEqual(module.fromlineno, 0)
self.assertIsNone(module.parent)
self.assertEqual(module.frame(), module)
self.assertEqual(module.root(), module)
self.assertEqual(module.file, os.path.abspath(resources.find('data/module.py')))
self.assertEqual(module.pure_python, 1)
self.assertEqual(module.package, 0)
self.assertFalse(module.is_statement)
self.assertEqual(module.statement(), module)
self.assertEqual(module.statement(), module)
def test_module_locals(self):
"""test the 'locals' dictionary of a astroid module"""
module = self.module
_locals = module.locals
self.assertIs(_locals, module.globals)
keys = sorted(_locals.keys())
should = ['MY_DICT', 'YO', 'YOUPI',
'__revision__', 'global_access', 'modutils', 'four_args',
'os', 'redirect', 'pb', 'LocalsVisitor', 'ASTWalker']
should.sort()
self.assertEqual(keys, should)
def test_function_base_props(self):
"""test base properties and method of a astroid function"""
module = self.module
function = module['global_access']
self.assertEqual(function.name, 'global_access')
self.assertEqual(function.doc, 'function test')
self.assertEqual(function.fromlineno, 11)
self.assertTrue(function.parent)
self.assertEqual(function.frame(), function)
self.assertEqual(function.parent.frame(), module)
self.assertEqual(function.root(), module)
self.assertEqual([n.name for n in function.args.args], ['key', 'val'])
self.assertEqual(function.type, 'function')
def test_function_locals(self):
"""test the 'locals' dictionary of a astroid function"""
_locals = self.module['global_access'].locals
self.assertEqual(len(_locals), 4)
keys = sorted(_locals.keys())
self.assertEqual(keys, ['i', 'key', 'local', 'val'])
def test_class_base_props(self):
"""test base properties and method of a astroid class"""
module = self.module
klass = module['YO']
self.assertEqual(klass.name, 'YO')
self.assertEqual(klass.doc, 'hehe')
self.assertEqual(klass.fromlineno, 25)
self.assertTrue(klass.parent)
self.assertEqual(klass.frame(), klass)
self.assertEqual(klass.parent.frame(), module)
self.assertEqual(klass.root(), module)
self.assertEqual(klass.basenames, [])
if IS_PY3:
self.assertTrue(klass.newstyle)
else:
self.assertFalse(klass.newstyle)
def test_class_locals(self):
"""test the 'locals' dictionary of a astroid class"""
module = self.module
klass1 = module['YO']
locals1 = klass1.locals
keys = sorted(locals1.keys())
self.assertEqual(keys, ['__init__', 'a'])
klass2 = module['YOUPI']
locals2 = klass2.locals
keys = locals2.keys()
self.assertEqual(sorted(keys),
['__init__', 'class_attr', 'class_method',
'method', 'static_method'])
def test_class_instance_attrs(self):
module = self.module
klass1 = module['YO']
klass2 = module['YOUPI']
self.assertEqual(list(klass1.instance_attrs.keys()), ['yo'])
self.assertEqual(list(klass2.instance_attrs.keys()), ['member'])
def test_class_basenames(self):
module = self.module
klass1 = module['YO']
klass2 = module['YOUPI']
self.assertEqual(klass1.basenames, [])
self.assertEqual(klass2.basenames, ['YO'])
def test_method_base_props(self):
"""test base properties and method of a astroid method"""
klass2 = self.module['YOUPI']
# "normal" method
method = klass2['method']
self.assertEqual(method.name, 'method')
self.assertEqual([n.name for n in method.args.args], ['self'])
self.assertEqual(method.doc, 'method test')
self.assertEqual(method.fromlineno, 47)
self.assertEqual(method.type, 'method')
# class method
method = klass2['class_method']
self.assertEqual([n.name for n in method.args.args], ['cls'])
self.assertEqual(method.type, 'classmethod')
# static method
method = klass2['static_method']
self.assertEqual(method.args.args, [])
self.assertEqual(method.type, 'staticmethod')
def test_method_locals(self):
"""test the 'locals' dictionary of a astroid method"""
method = self.module['YOUPI']['method']
_locals = method.locals
keys = sorted(_locals)
if sys.version_info < (3, 0):
self.assertEqual(len(_locals), 5)
self.assertEqual(keys, ['a', 'autre', 'b', 'local', 'self'])
else:# ListComp variables are no more accessible outside
self.assertEqual(len(_locals), 3)
self.assertEqual(keys, ['autre', 'local', 'self'])
class ModuleBuildTest(resources.SysPathSetup, FileBuildTest):
def setUp(self):
super(ModuleBuildTest, self).setUp()
abuilder = builder.AstroidBuilder()
import data.module
self.module = abuilder.module_build(data.module, 'data.module')
@unittest.skipIf(IS_PY3, "guess_encoding not used on Python 3")
class TestGuessEncoding(unittest.TestCase):
def setUp(self):
self.guess_encoding = builder._guess_encoding
def testEmacs(self):
e = self.guess_encoding('# -*- coding: UTF-8 -*-')
self.assertEqual(e, 'UTF-8')
e = self.guess_encoding('# -*- coding:UTF-8 -*-')
self.assertEqual(e, 'UTF-8')
e = self.guess_encoding('''
### -*- coding: ISO-8859-1 -*-
''')
self.assertEqual(e, 'ISO-8859-1')
e = self.guess_encoding('''
### -*- coding: ISO-8859-1 -*-
''')
self.assertIsNone(e)
def testVim(self):
e = self.guess_encoding('# vim:fileencoding=UTF-8')
self.assertEqual(e, 'UTF-8')
e = self.guess_encoding('''
### vim:fileencoding=ISO-8859-1
''')
self.assertEqual(e, 'ISO-8859-1')
e = self.guess_encoding('''
### vim:fileencoding= ISO-8859-1
''')
self.assertIsNone(e)
def test_wrong_coding(self):
# setting "coding" varaible
e = self.guess_encoding("coding = UTF-8")
self.assertIsNone(e)
# setting a dictionnary entry
e = self.guess_encoding("coding:UTF-8")
self.assertIsNone(e)
# setting an arguement
e = self.guess_encoding("def do_something(a_word_with_coding=None):")
self.assertIsNone(e)
def testUTF8(self):
e = self.guess_encoding('\xef\xbb\xbf any UTF-8 data')
self.assertEqual(e, 'UTF-8')
e = self.guess_encoding(' any UTF-8 data \xef\xbb\xbf')
self.assertIsNone(e)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
julian-seward1/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_tmpdir.py | 173 | 6017 | import sys
import py
import pytest
from _pytest.tmpdir import tmpdir
def test_funcarg(testdir):
testdir.makepyfile("""
def pytest_generate_tests(metafunc):
metafunc.addcall(id='a')
metafunc.addcall(id='b')
def test_func(tmpdir): pass
""")
from _pytest.tmpdir import TempdirFactory
reprec = testdir.inline_run()
calls = reprec.getcalls("pytest_runtest_setup")
item = calls[0].item
config = item.config
tmpdirhandler = TempdirFactory(config)
item._initrequest()
p = tmpdir(item._request, tmpdirhandler)
assert p.check()
bn = p.basename.strip("0123456789")
assert bn.endswith("test_func_a_")
item.name = "qwe/\\abc"
p = tmpdir(item._request, tmpdirhandler)
assert p.check()
bn = p.basename.strip("0123456789")
assert bn == "qwe__abc"
def test_ensuretemp(recwarn):
#pytest.deprecated_call(pytest.ensuretemp, 'hello')
d1 = pytest.ensuretemp('hello')
d2 = pytest.ensuretemp('hello')
assert d1 == d2
assert d1.check(dir=1)
class TestTempdirHandler:
def test_mktemp(self, testdir):
from _pytest.tmpdir import TempdirFactory
config = testdir.parseconfig()
config.option.basetemp = testdir.mkdir("hello")
t = TempdirFactory(config)
tmp = t.mktemp("world")
assert tmp.relto(t.getbasetemp()) == "world0"
tmp = t.mktemp("this")
assert tmp.relto(t.getbasetemp()).startswith("this")
tmp2 = t.mktemp("this")
assert tmp2.relto(t.getbasetemp()).startswith("this")
assert tmp2 != tmp
class TestConfigTmpdir:
def test_getbasetemp_custom_removes_old(self, testdir):
mytemp = testdir.tmpdir.join("xyz")
p = testdir.makepyfile("""
def test_1(tmpdir):
pass
""")
testdir.runpytest(p, '--basetemp=%s' % mytemp)
mytemp.check()
mytemp.ensure("hello")
testdir.runpytest(p, '--basetemp=%s' % mytemp)
mytemp.check()
assert not mytemp.join("hello").check()
def test_basetemp(testdir):
mytemp = testdir.tmpdir.mkdir("mytemp")
p = testdir.makepyfile("""
import pytest
def test_1():
pytest.ensuretemp("hello")
""")
result = testdir.runpytest(p, '--basetemp=%s' % mytemp)
assert result.ret == 0
assert mytemp.join('hello').check()
@pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'),
reason="symlink not available on this platform")
def test_tmpdir_always_is_realpath(testdir):
# the reason why tmpdir should be a realpath is that
# when you cd to it and do "os.getcwd()" you will anyway
# get the realpath. Using the symlinked path can thus
# easily result in path-inequality
# XXX if that proves to be a problem, consider using
# os.environ["PWD"]
realtemp = testdir.tmpdir.mkdir("myrealtemp")
linktemp = testdir.tmpdir.join("symlinktemp")
linktemp.mksymlinkto(realtemp)
p = testdir.makepyfile("""
def test_1(tmpdir):
import os
assert os.path.realpath(str(tmpdir)) == str(tmpdir)
""")
result = testdir.runpytest("-s", p, '--basetemp=%s/bt' % linktemp)
assert not result.ret
def test_tmpdir_too_long_on_parametrization(testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.parametrize("arg", ["1"*1000])
def test_some(arg, tmpdir):
tmpdir.ensure("hello")
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_tmpdir_factory(testdir):
testdir.makepyfile("""
import pytest
@pytest.fixture(scope='session')
def session_dir(tmpdir_factory):
return tmpdir_factory.mktemp('data', numbered=False)
def test_some(session_dir):
session_dir.isdir()
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_tmpdir_fallback_tox_env(testdir, monkeypatch):
"""Test that tmpdir works even if environment variables required by getpass
module are missing (#1010).
"""
monkeypatch.delenv('USER', raising=False)
monkeypatch.delenv('USERNAME', raising=False)
testdir.makepyfile("""
import pytest
def test_some(tmpdir):
assert tmpdir.isdir()
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.fixture
def break_getuser(monkeypatch):
monkeypatch.setattr('os.getuid', lambda: -1)
# taken from python 2.7/3.4
for envvar in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
monkeypatch.delenv(envvar, raising=False)
@pytest.mark.usefixtures("break_getuser")
@pytest.mark.skipif(sys.platform.startswith('win'), reason='no os.getuid on windows')
def test_tmpdir_fallback_uid_not_found(testdir):
"""Test that tmpdir works even if the current process's user id does not
correspond to a valid user.
"""
testdir.makepyfile("""
import pytest
def test_some(tmpdir):
assert tmpdir.isdir()
""")
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.usefixtures("break_getuser")
@pytest.mark.skipif(sys.platform.startswith('win'), reason='no os.getuid on windows')
def test_get_user_uid_not_found():
"""Test that get_user() function works even if the current process's
user id does not correspond to a valid user (e.g. running pytest in a
Docker container with 'docker run -u'.
"""
from _pytest.tmpdir import get_user
assert get_user() is None
@pytest.mark.skipif(not sys.platform.startswith('win'), reason='win only')
def test_get_user(monkeypatch):
"""Test that get_user() function works even if environment variables
required by getpass module are missing from the environment on Windows
(#1010).
"""
from _pytest.tmpdir import get_user
monkeypatch.delenv('USER', raising=False)
monkeypatch.delenv('USERNAME', raising=False)
assert get_user() is None
| mpl-2.0 |
quanvm009/codev7 | openerp/addons/delivery/__openerp__.py | 18 | 1975 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Delivery Costs',
'version': '1.0',
'category': 'Sales Management',
'description': """
Allows you to add delivery methods in sale orders and picking.
==============================================================
You can define your own carrier and delivery grids for prices. When creating
invoices from picking, OpenERP is able to add and compute the shipping line.
""",
'author': 'OpenERP SA',
'depends': ['sale', 'purchase', 'stock'],
'data': [
'security/ir.model.access.csv',
'delivery_report.xml',
'delivery_view.xml',
'partner_view.xml',
'delivery_data.xml'
],
'demo': ['delivery_demo.xml'],
'test': ['test/delivery_cost.yml',
'test/delivery_chained_pickings.yml',
],
'installable': True,
'auto_install': False,
'images': ['images/1_delivery_method.jpeg','images/2_delivery_pricelist.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
JazzeYoung/VeryDeepAutoEncoder | theano/tensor/tests/test_extra_ops.py | 1 | 36192 | from __future__ import absolute_import, print_function, division
import numpy as np
import numpy
import theano
from theano.tests import unittest_tools as utt
from theano.tensor.extra_ops import (SearchsortedOp, searchsorted,
CumsumOp, cumsum, CumprodOp, cumprod,
CpuContiguous, cpu_contiguous, BinCountOp,
bincount, DiffOp, diff, squeeze, compress,
RepeatOp, repeat, Bartlett, bartlett,
FillDiagonal, fill_diagonal,
FillDiagonalOffset, fill_diagonal_offset,
to_one_hot, Unique)
from theano import tensor as T
from theano import config, tensor, function
from theano.tests.unittest_tools import attr
numpy_ver = [int(n) for n in numpy.__version__.split('.')[:2]]
numpy_16 = bool(numpy_ver >= [1, 6])
def test_cpu_contiguous():
a = T.fmatrix('a')
i = T.iscalar('i')
a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
f = theano.function([a, i], cpu_contiguous(a.reshape((5, 4))[::i]))
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, CpuContiguous) for node in topo])
assert f(a_val, 1).flags['C_CONTIGUOUS']
assert f(a_val, 2).flags['C_CONTIGUOUS']
assert f(a_val, 3).flags['C_CONTIGUOUS']
# Test the grad:
theano.tests.unittest_tools.verify_grad(cpu_contiguous,
[numpy.random.rand(5, 7, 2)])
class TestSearchsortedOp(utt.InferShapeTester):
def setUp(self):
super(TestSearchsortedOp, self).setUp()
self.op_class = SearchsortedOp
self.op = SearchsortedOp()
self.x = T.vector('x')
self.v = T.tensor3('v')
self.a = 30 * np.random.random(50).astype(config.floatX)
self.b = 30 * np.random.random((8, 10, 5)).astype(config.floatX)
self.idx_sorted = np.argsort(self.a).astype('int32')
def test_searchsortedOp_on_sorted_input(self):
f = theano.function([self.x, self.v], searchsorted(self.x, self.v))
assert np.allclose(np.searchsorted(self.a[self.idx_sorted], self.b),
f(self.a[self.idx_sorted], self.b))
sorter = T.vector('sorter', dtype='int32')
f = theano.function([self.x, self.v, sorter], self.x.searchsorted(self.v, sorter=sorter, side='right'))
assert np.allclose(self.a.searchsorted(self.b, sorter=self.idx_sorted, side='right'),
f(self.a, self.b, self.idx_sorted))
sa = self.a[self.idx_sorted]
f = theano.function([self.x, self.v], self.x.searchsorted(self.v, side='right'))
assert np.allclose(sa.searchsorted(self.b, side='right'), f(sa, self.b))
def test_searchsortedOp_wrong_side_kwd(self):
self.assertRaises(ValueError, searchsorted, self.x, self.v, side='asdfa')
def test_searchsortedOp_on_no_1d_inp(self):
no_1d = T.dmatrix('no_1d')
self.assertRaises(ValueError, searchsorted, no_1d, self.v)
self.assertRaises(ValueError, searchsorted, self.x, self.v, sorter=no_1d)
def test_searchsortedOp_on_float_sorter(self):
sorter = T.vector('sorter', dtype="float32")
self.assertRaises(TypeError, searchsorted,
self.x, self.v, sorter=sorter)
def test_searchsortedOp_on_int_sorter(self):
compatible_types = ('int8', 'int16', 'int32')
if theano.configdefaults.python_int_bitwidth() == 64:
compatible_types += ('int64',)
# 'uint8', 'uint16', 'uint32', 'uint64')
for dtype in compatible_types:
sorter = T.vector('sorter', dtype=dtype)
f = theano.function([self.x, self.v, sorter],
searchsorted(self.x, self.v, sorter=sorter),
allow_input_downcast=True)
assert np.allclose(np.searchsorted(self.a, self.b, sorter=self.idx_sorted),
f(self.a, self.b, self.idx_sorted))
def test_searchsortedOp_on_right_side(self):
f = theano.function([self.x, self.v],
searchsorted(self.x, self.v, side='right'))
assert np.allclose(np.searchsorted(self.a, self.b, side='right'),
f(self.a, self.b))
def test_infer_shape(self):
# Test using default parameters' value
self._compile_and_check([self.x, self.v],
[searchsorted(self.x, self.v)],
[self.a[self.idx_sorted], self.b],
self.op_class)
# Test parameter ``sorter``
sorter = T.vector('sorter', dtype="int32")
self._compile_and_check([self.x, self.v, sorter],
[searchsorted(self.x, self.v, sorter=sorter)],
[self.a, self.b, self.idx_sorted],
self.op_class)
# Test parameter ``side``
la = np.ones(10).astype(config.floatX)
lb = np.ones(shape=(1, 2, 3)).astype(config.floatX)
self._compile_and_check([self.x, self.v],
[searchsorted(self.x, self.v, side='right')],
[la, lb],
self.op_class)
def test_grad(self):
utt.verify_grad(self.op, [self.a[self.idx_sorted], self.b])
class TestCumsumOp(utt.InferShapeTester):
def setUp(self):
super(TestCumsumOp, self).setUp()
self.op_class = CumsumOp
self.op = CumsumOp()
def test_cumsumOp(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis out of bounds
self.assertRaises(ValueError, cumsum, x, axis=3)
self.assertRaises(ValueError, cumsum, x, axis=-4)
f = theano.function([x], cumsum(x))
assert np.allclose(np.cumsum(a), f(a)) # Test axis=None
for axis in range(-len(a.shape), len(a.shape)):
f = theano.function([x], cumsum(x, axis=axis))
assert np.allclose(np.cumsum(a, axis=axis), f(a))
def test_infer_shape(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis=None
self._compile_and_check([x],
[self.op(x)],
[a],
self.op_class)
for axis in range(-len(a.shape), len(a.shape)):
self._compile_and_check([x],
[cumsum(x, axis=axis)],
[a],
self.op_class)
def test_grad(self):
a = np.random.random((3, 5, 2)).astype(config.floatX)
utt.verify_grad(self.op, [a]) # Test axis=None
for axis in range(-len(a.shape), len(a.shape)):
utt.verify_grad(self.op_class(axis=axis), [a], eps=4e-4)
class TestCumprodOp(utt.InferShapeTester):
def setUp(self):
super(TestCumprodOp, self).setUp()
self.op_class = CumprodOp
self.op = CumprodOp()
def test_CumprodOp(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis out of bounds
self.assertRaises(ValueError, cumprod, x, axis=3)
self.assertRaises(ValueError, cumprod, x, axis=-4)
f = theano.function([x], cumprod(x))
assert np.allclose(np.cumprod(a), f(a)) # Test axis=None
for axis in range(-len(a.shape), len(a.shape)):
f = theano.function([x], cumprod(x, axis=axis))
assert np.allclose(np.cumprod(a, axis=axis), f(a))
def test_infer_shape(self):
x = T.tensor3('x')
a = np.random.random((3, 5, 2)).astype(config.floatX)
# Test axis=None
self._compile_and_check([x],
[self.op(x)],
[a],
self.op_class)
for axis in range(-len(a.shape), len(a.shape)):
self._compile_and_check([x],
[cumprod(x, axis=axis)],
[a],
self.op_class)
def test_grad(self):
a = np.random.random((3, 5, 2)).astype(config.floatX)
utt.verify_grad(self.op, [a]) # Test axis=None
for axis in range(-len(a.shape), len(a.shape)):
utt.verify_grad(self.op_class(axis=axis), [a])
class TestBinCountOp(utt.InferShapeTester):
def setUp(self):
super(TestBinCountOp, self).setUp()
self.op_class = BinCountOp
self.op = BinCountOp()
def test_bincountFn(self):
w = T.vector('w')
def ref(data, w=None, minlength=None):
size = int(data.max() + 1)
if minlength:
size = max(size, minlength)
if w is not None:
out = np.zeros(size, dtype=w.dtype)
for i in range(data.shape[0]):
out[data[i]] += w[i]
else:
out = np.zeros(size, dtype=a.dtype)
for i in range(data.shape[0]):
out[data[i]] += 1
return out
for dtype in ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64'):
x = T.vector('x', dtype=dtype)
a = np.random.random_integers(50, size=(25)).astype(dtype)
weights = np.random.random((25,)).astype(config.floatX)
f1 = theano.function([x], bincount(x))
f2 = theano.function([x, w], bincount(x, weights=w))
assert (ref(a) == f1(a)).all()
assert np.allclose(ref(a, weights), f2(a, weights))
f3 = theano.function([x], bincount(x, minlength=55))
f4 = theano.function([x], bincount(x, minlength=5))
assert (ref(a, minlength=55) == f3(a)).all()
assert (ref(a, minlength=5) == f4(a)).all()
# skip the following test when using unsigned ints
if not dtype.startswith('u'):
a[0] = -1
f5 = theano.function([x], bincount(x, assert_nonneg=True))
self.assertRaises(AssertionError, f5, a)
def test_bincountOp(self):
w = T.vector('w')
for dtype in ('int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64'):
# uint64 always fails
# int64 and uint32 also fail if python int are 32-bit
int_bitwidth = theano.configdefaults.python_int_bitwidth()
if int_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if int_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
x = T.vector('x', dtype=dtype)
if dtype in numpy_unsupported_dtypes:
self.assertRaises(TypeError, BinCountOp(), x)
else:
a = np.random.random_integers(50, size=(25)).astype(dtype)
weights = np.random.random((25,)).astype(config.floatX)
f1 = theano.function([x], BinCountOp()(x, weights=None))
f2 = theano.function([x, w], BinCountOp()(x, weights=w))
assert (np.bincount(a) == f1(a)).all()
assert np.allclose(np.bincount(a, weights=weights),
f2(a, weights))
if not numpy_16:
continue
f3 = theano.function([x], BinCountOp(minlength=23)(x, weights=None))
f4 = theano.function([x], BinCountOp(minlength=5)(x, weights=None))
assert (np.bincount(a, minlength=23) == f3(a)).all()
assert (np.bincount(a, minlength=5) == f4(a)).all()
@attr('slow')
def test_infer_shape(self):
for dtype in tensor.discrete_dtypes:
# uint64 always fails
# int64 and uint32 also fail if python int are 32-bit
int_bitwidth = theano.configdefaults.python_int_bitwidth()
if int_bitwidth == 64:
numpy_unsupported_dtypes = ('uint64',)
if int_bitwidth == 32:
numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
x = T.vector('x', dtype=dtype)
if dtype in numpy_unsupported_dtypes:
self.assertRaises(TypeError, BinCountOp(), x)
else:
self._compile_and_check([x],
[BinCountOp()(x, None)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
weights = np.random.random((25,)).astype(config.floatX)
self._compile_and_check([x],
[BinCountOp()(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
if not numpy_16:
continue
self._compile_and_check([x],
[BinCountOp(minlength=60)(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
self._compile_and_check([x],
[BinCountOp(minlength=5)(x, weights=weights)],
[np.random.random_integers(
50, size=(25,)).astype(dtype)],
self.op_class)
class TestDiffOp(utt.InferShapeTester):
nb = 10 # Number of time iterating for n
def setUp(self):
super(TestDiffOp, self).setUp()
self.op_class = DiffOp
self.op = DiffOp()
def test_diffOp(self):
x = T.matrix('x')
a = np.random.random((30, 50)).astype(config.floatX)
f = theano.function([x], diff(x))
assert np.allclose(np.diff(a), f(a))
for axis in range(len(a.shape)):
for k in range(TestDiffOp.nb):
g = theano.function([x], diff(x, n=k, axis=axis))
assert np.allclose(np.diff(a, n=k, axis=axis), g(a))
def test_infer_shape(self):
x = T.matrix('x')
a = np.random.random((30, 50)).astype(config.floatX)
self._compile_and_check([x],
[self.op(x)],
[a],
self.op_class)
for axis in range(len(a.shape)):
for k in range(TestDiffOp.nb):
self._compile_and_check([x],
[diff(x, n=k, axis=axis)],
[a],
self.op_class)
def test_grad(self):
x = T.vector('x')
a = np.random.random(50).astype(config.floatX)
theano.function([x], T.grad(T.sum(diff(x)), x))
utt.verify_grad(self.op, [a])
for k in range(TestDiffOp.nb):
theano.function([x], T.grad(T.sum(diff(x, n=k)), x))
utt.verify_grad(DiffOp(n=k), [a], eps=7e-3)
class SqueezeTester(utt.InferShapeTester):
shape_list = [(1, 3),
(1, 2, 3),
(1, 5, 1, 1, 6)]
broadcast_list = [[True, False],
[True, False, False],
[True, False, True, True, False]]
def setUp(self):
super(SqueezeTester, self).setUp()
self.op = squeeze
def test_op(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
variable = tensor.TensorType(theano.config.floatX, broadcast)()
f = theano.function([variable], self.op(variable))
expected = numpy.squeeze(data)
tested = f(data)
assert tested.shape == expected.shape
assert numpy.allclose(tested, expected)
def test_infer_shape(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
variable = tensor.TensorType(theano.config.floatX, broadcast)()
self._compile_and_check([variable],
[self.op(variable)],
[data],
tensor.DimShuffle,
warn=False)
def test_grad(self):
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
utt.verify_grad(self.op, [data])
def test_var_interface(self):
# same as test_op, but use a_theano_var.squeeze.
for shape, broadcast in zip(self.shape_list, self.broadcast_list):
data = numpy.random.random(size=shape).astype(theano.config.floatX)
variable = tensor.TensorType(theano.config.floatX, broadcast)()
f = theano.function([variable], variable.squeeze())
expected = numpy.squeeze(data)
tested = f(data)
assert tested.shape == expected.shape
assert numpy.allclose(tested, expected)
class CompressTester(utt.InferShapeTester):
axis_list = [None,
-1,
0,
0,
0,
1]
cond_list = [[1, 0, 1, 0, 0, 1],
[0, 1, 1, 0],
[0, 1, 1, 0],
[],
[0, 0, 0, 0],
[1, 1, 0, 1, 0]]
shape_list = [(2, 3),
(4, 3),
(4, 3),
(4, 3),
(4, 3),
(3, 5)]
def setUp(self):
super(CompressTester, self).setUp()
self.op = compress
def test_op(self):
for axis, cond, shape in zip(self.axis_list, self.cond_list,
self.shape_list):
cond_var = theano.tensor.ivector()
data = numpy.random.random(size=shape).astype(theano.config.floatX)
data_var = theano.tensor.matrix()
f = theano.function([cond_var, data_var],
self.op(cond_var, data_var, axis=axis))
expected = numpy.compress(cond, data, axis=axis)
tested = f(cond, data)
assert tested.shape == expected.shape
assert numpy.allclose(tested, expected)
class TestRepeatOp(utt.InferShapeTester):
def _possible_axis(self, ndim):
return [None] + list(range(ndim)) + [-i for i in range(ndim)]
def setUp(self):
super(TestRepeatOp, self).setUp()
self.op_class = RepeatOp
self.op = RepeatOp()
# uint64 always fails
# int64 and uint32 also fail if python int are 32-bit
ptr_bitwidth = theano.configdefaults.local_bitwidth()
if ptr_bitwidth == 64:
self.numpy_unsupported_dtypes = ('uint64',)
if ptr_bitwidth == 32:
self.numpy_unsupported_dtypes = ('uint32', 'int64', 'uint64')
def test_repeatOp(self):
for ndim in range(3):
x = T.TensorType(config.floatX, [False] * ndim)()
a = np.random.random((10, ) * ndim).astype(config.floatX)
for axis in self._possible_axis(ndim):
for dtype in tensor.discrete_dtypes:
r_var = T.scalar(dtype=dtype)
r = numpy.asarray(3, dtype=dtype)
if (dtype == 'uint64' or
(dtype in self.numpy_unsupported_dtypes and
r_var.ndim == 1)):
self.assertRaises(TypeError, repeat, x, r_var, axis=axis)
else:
f = theano.function([x, r_var],
repeat(x, r_var, axis=axis))
assert np.allclose(np.repeat(a, r, axis=axis),
f(a, r))
r_var = T.vector(dtype=dtype)
if axis is None:
r = np.random.random_integers(
5, size=a.size).astype(dtype)
else:
r = np.random.random_integers(
5, size=(10,)).astype(dtype)
if dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1:
self.assertRaises(TypeError,
repeat, x, r_var, axis=axis)
else:
f = theano.function([x, r_var],
repeat(x, r_var, axis=axis))
assert np.allclose(np.repeat(a, r, axis=axis),
f(a, r))
# check when r is a list of single integer, e.g. [3].
r = np.random.random_integers(
10, size=()).astype(dtype) + 2
f = theano.function([x],
repeat(x, [r], axis=axis))
assert np.allclose(np.repeat(a, r, axis=axis),
f(a))
assert not np.any([isinstance(n.op, RepeatOp)
for n in f.maker.fgraph.toposort()])
# check when r is theano tensortype that broadcastable is (True,)
r_var = theano.tensor.TensorType(broadcastable=(True,),
dtype=dtype)()
r = np.random.random_integers(5, size=(1,)).astype(dtype)
f = theano.function([x, r_var],
repeat(x, r_var, axis=axis))
assert np.allclose(np.repeat(a, r[0], axis=axis),
f(a, r))
assert not np.any([isinstance(n.op, RepeatOp)
for n in f.maker.fgraph.toposort()])
@attr('slow')
def test_infer_shape(self):
for ndim in range(4):
x = T.TensorType(config.floatX, [False] * ndim)()
shp = (numpy.arange(ndim) + 1) * 5
a = np.random.random(shp).astype(config.floatX)
for axis in self._possible_axis(ndim):
for dtype in tensor.discrete_dtypes:
r_var = T.scalar(dtype=dtype)
r = numpy.asarray(3, dtype=dtype)
if dtype in self.numpy_unsupported_dtypes:
r_var = T.vector(dtype=dtype)
self.assertRaises(TypeError, repeat, x, r_var)
else:
self._compile_and_check([x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
r_var = T.vector(dtype=dtype)
if axis is None:
r = np.random.random_integers(
5, size=a.size).astype(dtype)
elif a.size > 0:
r = np.random.random_integers(
5, size=a.shape[axis]).astype(dtype)
else:
r = np.random.random_integers(
5, size=(10,)).astype(dtype)
self._compile_and_check(
[x, r_var],
[RepeatOp(axis=axis)(x, r_var)],
[a, r],
self.op_class)
def test_grad(self):
for ndim in range(3):
a = np.random.random((10, ) * ndim).astype(config.floatX)
for axis in self._possible_axis(ndim):
utt.verify_grad(lambda x: RepeatOp(axis=axis)(x, 3), [a])
def test_broadcastable(self):
x = T.TensorType(config.floatX, [False, True, False])()
r = RepeatOp(axis=1)(x, 2)
self.assertEqual(r.broadcastable, (False, False, False))
r = RepeatOp(axis=1)(x, 1)
self.assertEqual(r.broadcastable, (False, True, False))
r = RepeatOp(axis=0)(x, 2)
self.assertEqual(r.broadcastable, (False, True, False))
class TestBartlett(utt.InferShapeTester):
def setUp(self):
super(TestBartlett, self).setUp()
self.op_class = Bartlett
self.op = bartlett
def test_perform(self):
x = tensor.lscalar()
f = function([x], self.op(x))
M = numpy.random.random_integers(3, 50, size=())
assert numpy.allclose(f(M), numpy.bartlett(M))
assert numpy.allclose(f(0), numpy.bartlett(0))
assert numpy.allclose(f(-1), numpy.bartlett(-1))
b = numpy.array([17], dtype='uint8')
assert numpy.allclose(f(b[0]), numpy.bartlett(b[0]))
def test_infer_shape(self):
x = tensor.lscalar()
self._compile_and_check([x], [self.op(x)],
[numpy.random.random_integers(3, 50, size=())],
self.op_class)
self._compile_and_check([x], [self.op(x)], [0], self.op_class)
self._compile_and_check([x], [self.op(x)], [1], self.op_class)
class TestFillDiagonal(utt.InferShapeTester):
rng = numpy.random.RandomState(43)
def setUp(self):
super(TestFillDiagonal, self).setUp()
self.op_class = FillDiagonal
self.op = fill_diagonal
def test_perform(self):
x = tensor.matrix()
y = tensor.scalar()
f = function([x, y], fill_diagonal(x, y))
for shp in [(8, 8), (5, 8), (8, 5)]:
a = numpy.random.rand(*shp).astype(config.floatX)
val = numpy.cast[config.floatX](numpy.random.rand())
out = f(a, val)
# We can't use numpy.fill_diagonal as it is bugged.
assert numpy.allclose(numpy.diag(out), val)
assert (out == val).sum() == min(a.shape)
# test for 3d tensor
a = numpy.random.rand(3, 3, 3).astype(config.floatX)
x = tensor.tensor3()
y = tensor.scalar()
f = function([x, y], fill_diagonal(x, y))
val = numpy.cast[config.floatX](numpy.random.rand() + 10)
out = f(a, val)
# We can't use numpy.fill_diagonal as it is bugged.
assert out[0, 0, 0] == val
assert out[1, 1, 1] == val
assert out[2, 2, 2] == val
assert (out == val).sum() == min(a.shape)
@attr('slow')
def test_gradient(self):
utt.verify_grad(fill_diagonal, [numpy.random.rand(5, 8),
numpy.random.rand()],
n_tests=1, rng=TestFillDiagonal.rng)
utt.verify_grad(fill_diagonal, [numpy.random.rand(8, 5),
numpy.random.rand()],
n_tests=1, rng=TestFillDiagonal.rng)
def test_infer_shape(self):
z = tensor.dtensor3()
x = tensor.dmatrix()
y = tensor.dscalar()
self._compile_and_check([x, y], [self.op(x, y)],
[numpy.random.rand(8, 5),
numpy.random.rand()],
self.op_class)
self._compile_and_check([z, y], [self.op(z, y)],
# must be square when nd>2
[numpy.random.rand(8, 8, 8),
numpy.random.rand()],
self.op_class,
warn=False)
class TestFillDiagonalOffset(utt.InferShapeTester):
rng = numpy.random.RandomState(43)
def setUp(self):
super(TestFillDiagonalOffset, self).setUp()
self.op_class = FillDiagonalOffset
self.op = fill_diagonal_offset
def test_perform(self):
x = tensor.matrix()
y = tensor.scalar()
z = tensor.iscalar()
f = function([x, y, z], fill_diagonal_offset(x, y, z))
for test_offset in (-5, -4, -1, 0, 1, 4, 5):
for shp in [(8, 8), (5, 8), (8, 5), (5, 5)]:
a = numpy.random.rand(*shp).astype(config.floatX)
val = numpy.cast[config.floatX](numpy.random.rand())
out = f(a, val, test_offset)
# We can't use numpy.fill_diagonal as it is bugged.
assert numpy.allclose(numpy.diag(out, test_offset), val)
if test_offset >= 0:
assert (out == val).sum() == min(min(a.shape),
a.shape[1] - test_offset)
else:
assert (out == val).sum() == min(min(a.shape),
a.shape[0] + test_offset)
def test_gradient(self):
for test_offset in (-5, -4, -1, 0, 1, 4, 5):
# input 'offset' will not be tested
def fill_diagonal_with_fix_offset(a, val):
return fill_diagonal_offset(a, val, test_offset)
utt.verify_grad(fill_diagonal_with_fix_offset,
[numpy.random.rand(5, 8), numpy.random.rand()],
n_tests=1, rng=TestFillDiagonalOffset.rng)
utt.verify_grad(fill_diagonal_with_fix_offset,
[numpy.random.rand(8, 5), numpy.random.rand()],
n_tests=1, rng=TestFillDiagonalOffset.rng)
utt.verify_grad(fill_diagonal_with_fix_offset,
[numpy.random.rand(5, 5), numpy.random.rand()],
n_tests=1, rng=TestFillDiagonalOffset.rng)
def test_infer_shape(self):
x = tensor.dmatrix()
y = tensor.dscalar()
z = tensor.iscalar()
for test_offset in (-5, -4, -1, 0, 1, 4, 5):
self._compile_and_check([x, y, z], [self.op(x, y, z)],
[numpy.random.rand(8, 5),
numpy.random.rand(),
test_offset],
self.op_class)
self._compile_and_check([x, y, z], [self.op(x, y, z)],
[numpy.random.rand(5, 8),
numpy.random.rand(),
test_offset],
self.op_class)
def test_to_one_hot():
v = theano.tensor.ivector()
o = to_one_hot(v, 10)
f = theano.function([v], o)
out = f([1, 2, 3, 5, 6])
assert out.dtype == theano.config.floatX
assert numpy.allclose(
out,
[[0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]])
v = theano.tensor.ivector()
o = to_one_hot(v, 10, dtype="int32")
f = theano.function([v], o)
out = f([1, 2, 3, 5, 6])
assert out.dtype == "int32"
assert numpy.allclose(
out,
[[0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 0., 0., 0.]])
class test_Unique(utt.InferShapeTester):
def setUp(self):
super(test_Unique, self).setUp()
self.op_class = Unique
self.ops = [Unique(),
Unique(True),
Unique(False, True),
Unique(True, True)]
if bool(numpy_ver >= [1, 9]):
self.ops.extend([
Unique(False, False, True),
Unique(True, False, True),
Unique(False, True, True),
Unique(True, True, True)])
def test_basic_vector(self):
"""
Basic test for a vector.
Done by using the op and checking that it returns the right answer.
"""
x = theano.tensor.vector()
inp = np.asarray([2, 1, 3, 2], dtype=config.floatX)
list_outs_expected = [[np.unique(inp)],
np.unique(inp, True),
np.unique(inp, False, True),
np.unique(inp, True, True)]
if bool(numpy_ver >= [1, 9]):
list_outs_expected.extend([
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)])
for op, outs_expected in zip(self.ops, list_outs_expected):
f = theano.function(inputs=[x], outputs=op(x, return_list=True))
outs = f(inp)
# Compare the result computed to the expected value.
for out, out_exp in zip(outs, outs_expected):
utt.assert_allclose(out, out_exp)
def test_basic_matrix(self):
""" Basic test for a matrix.
Done by using the op and checking that it returns the right answer.
"""
x = theano.tensor.matrix()
inp = np.asarray([[2, 1], [3, 2], [2, 3]], dtype=config.floatX)
list_outs_expected = [[np.unique(inp)],
np.unique(inp, True),
np.unique(inp, False, True),
np.unique(inp, True, True)]
if bool(numpy_ver >= [1, 9]):
list_outs_expected.extend([
np.unique(inp, False, False, True),
np.unique(inp, True, False, True),
np.unique(inp, False, True, True),
np.unique(inp, True, True, True)])
for op, outs_expected in zip(self.ops, list_outs_expected):
f = theano.function(inputs=[x], outputs=op(x, return_list=True))
outs = f(inp)
# Compare the result computed to the expected value.
for out, out_exp in zip(outs, outs_expected):
utt.assert_allclose(out, out_exp)
def test_infer_shape_vector(self):
"""
Testing the infer_shape with a vector.
"""
x = theano.tensor.vector()
for op in self.ops:
if not op.return_inverse:
continue
if op.return_index:
f = op(x)[2]
else:
f = op(x)[1]
self._compile_and_check([x],
[f],
[np.asarray(np.array([2, 1, 3, 2]),
dtype=config.floatX)],
self.op_class)
def test_infer_shape_matrix(self):
"""
Testing the infer_shape with a matrix.
"""
x = theano.tensor.matrix()
for op in self.ops:
if not op.return_inverse:
continue
if op.return_index:
f = op(x)[2]
else:
f = op(x)[1]
self._compile_and_check([x],
[f],
[np.asarray(np.array([[2, 1], [3, 2], [2, 3]]),
dtype=config.floatX)],
self.op_class)
| bsd-3-clause |
40223101/w17test | static/Brython3.1.0-20150301-090019/Lib/sys.py | 109 | 4959 | # hack to return special attributes
from _sys import *
from javascript import JSObject
has_local_storage=__BRYTHON__.has_local_storage
has_session_storage = __BRYTHON__.has_session_storage
has_json=__BRYTHON__.has_json
argv = ['__main__']
base_exec_prefix = __BRYTHON__.brython_path
base_prefix = __BRYTHON__.brython_path
builtin_module_names=__BRYTHON__.builtin_module_names
byteorder='little'
def exc_info():
exc = __BRYTHON__.exception_stack[-1]
return (exc.__class__,exc,exc.traceback)
exec_prefix = __BRYTHON__.brython_path
executable = __BRYTHON__.brython_path+'/brython.js'
def exit(i=None):
raise SystemExit('')
class flag_class:
def __init__(self):
self.debug=0
self.inspect=0
self.interactive=0
self.optimize=0
self.dont_write_bytecode=0
self.no_user_site=0
self.no_site=0
self.ignore_environment=0
self.verbose=0
self.bytes_warning=0
self.quiet=0
self.hash_randomization=1
flags=flag_class()
def getfilesystemencoding(*args,**kw):
"""getfilesystemencoding() -> string
Return the encoding used to convert Unicode filenames in
operating system filenames."""
return 'utf-8'
maxsize=2147483647
maxunicode=1114111
path = __BRYTHON__.path
#path_hooks = list(JSObject(__BRYTHON__.path_hooks))
meta_path=__BRYTHON__.meta_path
platform="brython"
prefix = __BRYTHON__.brython_path
version = '.'.join(str(x) for x in __BRYTHON__.version_info[:3])
version += " (default, %s) \n[Javascript 1.5] on Brython" % __BRYTHON__.compiled_date
hexversion = 0x03000000 # python 3.0
class __version_info(object):
def __init__(self, version_info):
self.version_info = version_info
self.major = version_info[0]
self.minor = version_info[1]
self.micro = version_info[2]
self.releaselevel = version_info[3]
self.serial = version_info[4]
def __getitem__(self, index):
if isinstance(self.version_info[index], list):
return tuple(self.version_info[index])
return self.version_info[index]
def hexversion(self):
try:
return '0%d0%d0%d' % (self.major, self.minor, self.micro)
finally: #probably some invalid char in minor (rc, etc)
return '0%d0000' % (self.major)
def __str__(self):
_s="sys.version(major=%d, minor=%d, micro=%d, releaselevel='%s', serial=%d)"
return _s % (self.major, self.minor, self.micro,
self.releaselevel, self.serial)
#return str(self.version_info)
def __eq__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) == other
raise Error("Error! I don't know how to compare!")
def __ge__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) >= other
raise Error("Error! I don't know how to compare!")
def __gt__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) > other
raise Error("Error! I don't know how to compare!")
def __le__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) <= other
raise Error("Error! I don't know how to compare!")
def __lt__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) < other
raise Error("Error! I don't know how to compare!")
def __ne__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) != other
raise Error("Error! I don't know how to compare!")
#eventually this needs to be the real python version such as 3.0, 3.1, etc
version_info=__version_info(__BRYTHON__.version_info)
class _implementation:
def __init__(self):
self.name='brython'
self.version = __version_info(__BRYTHON__.implementation)
self.hexversion = self.version.hexversion()
self.cache_tag=None
def __repr__(self):
return "namespace(name='%s' version=%s hexversion='%s')" % (self.name, self.version, self.hexversion)
def __str__(self):
return "namespace(name='%s' version=%s hexversion='%s')" % (self.name, self.version, self.hexversion)
implementation=_implementation()
class _hash_info:
def __init__(self):
self.width=32,
self.modulus=2147483647
self.inf=314159
self.nan=0
self.imag=1000003
self.algorithm='siphash24'
self.hash_bits=64
self.seed_bits=128
cutoff=0
def __repr(self):
#fix me
return "sys.hash_info(width=32, modulus=2147483647, inf=314159, nan=0, imag=1000003, algorithm='siphash24', hash_bits=64, seed_bits=128, cutoff=0)"
hash_info=_hash_info()
warnoptions=[]
def getfilesystemencoding():
return 'utf-8'
#delete objects not in python sys module namespace
del JSObject
del _implementation
| gpl-3.0 |
ToBeReplaced/ansible-modules-extras | packaging/os/pkgng.py | 60 | 11130 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, bleader
# Written by bleader <bleader@ratonland.org>
# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
# that was based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: pkgng
short_description: Package manager for FreeBSD >= 9.0
description:
- Manage binary packages for FreeBSD using 'pkgng' which
is available in versions after 9.0.
version_added: "1.2"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package
choices: [ 'present', 'absent' ]
required: false
default: present
cached:
description:
- use local package base or try to fetch an updated one
choices: [ 'yes', 'no' ]
required: false
default: no
annotation:
description:
- a comma-separated list of keyvalue-pairs of the form
<+/-/:><key>[=<value>]. A '+' denotes adding an annotation, a
'-' denotes removing an annotation, and ':' denotes modifying an
annotation.
If setting or modifying annotations, a value must be provided.
required: false
version_added: "1.6"
pkgsite:
description:
- for pkgng versions before 1.1.4, specify packagesite to use
for downloading packages, if not specified, use settings from
/usr/local/etc/pkg.conf
for newer pkgng versions, specify a the name of a repository
configured in /usr/local/etc/pkg/repos
required: false
rootdir:
description:
- for pkgng versions 1.5 and later, pkg will install all packages
within the specified root directory
required: false
author: "bleader (@bleader)"
notes:
- When using pkgsite, be careful that already in cache packages won't be downloaded again.
'''
EXAMPLES = '''
# Install package foo
- pkgng: name=foo state=present
# Annotate package foo and bar
- pkgng: name=foo,bar annotation=+test1=baz,-test2,:test3=foobar
# Remove packages foo and bar
- pkgng: name=foo,bar state=absent
'''
import json
import shlex
import os
import re
import sys
def query_package(module, pkgng_path, name, rootdir_arg):
rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, rootdir_arg, name))
if rc == 0:
return True
return False
def pkgng_older_than(module, pkgng_path, compare_version):
rc, out, err = module.run_command("%s -v" % pkgng_path)
version = map(lambda x: int(x), re.split(r'[\._]', out))
i = 0
new_pkgng = True
while compare_version[i] == version[i]:
i += 1
if i == min(len(compare_version), len(version)):
break
else:
if compare_version[i] > version[i]:
new_pkgng = False
return not new_pkgng
def remove_packages(module, pkgng_path, packages, rootdir_arg):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, pkgng_path, package, rootdir_arg):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, rootdir_arg, package))
if not module.check_mode and query_package(module, pkgng_path, package, rootdir_arg):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
return (True, "removed %s package(s)" % remove_c)
return (False, "package(s) already absent")
def install_packages(module, pkgng_path, packages, cached, pkgsite, rootdir_arg):
install_c = 0
# as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
# in /usr/local/etc/pkg/repos
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
if pkgsite != "":
if old_pkgng:
pkgsite = "PACKAGESITE=%s" % (pkgsite)
else:
pkgsite = "-r %s" % (pkgsite)
batch_var = 'env BATCH=yes' # This environment variable skips mid-install prompts,
# setting them to their default values.
if not module.check_mode and not cached:
if old_pkgng:
rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
else:
rc, out, err = module.run_command("%s update" % (pkgng_path))
if rc != 0:
module.fail_json(msg="Could not update catalogue")
for package in packages:
if query_package(module, pkgng_path, package, rootdir_arg):
continue
if not module.check_mode:
if old_pkgng:
rc, out, err = module.run_command("%s %s %s install -g -U -y %s" % (batch_var, pkgsite, pkgng_path, package))
else:
rc, out, err = module.run_command("%s %s %s install %s -g -U -y %s" % (batch_var, pkgng_path, rootdir_arg, pkgsite, package))
if not module.check_mode and not query_package(module, pkgng_path, package, rootdir_arg):
module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err)
install_c += 1
if install_c > 0:
return (True, "added %s package(s)" % (install_c))
return (False, "package(s) already present")
def annotation_query(module, pkgng_path, package, tag, rootdir_arg):
rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, rootdir_arg, package))
match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
if match:
return match.group('value')
return False
def annotation_add(module, pkgng_path, package, tag, value, rootdir_arg):
_value = annotation_query(module, pkgng_path, package, tag, rootdir_arg)
if not _value:
# Annotation does not exist, add it.
rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
% (pkgng_path, rootdir_arg, package, tag, value))
if rc != 0:
module.fail_json("could not annotate %s: %s"
% (package, out), stderr=err)
return True
elif _value != value:
# Annotation exists, but value differs
module.fail_json(
mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
% (package, tag, _value, value))
return False
else:
# Annotation exists, nothing to do
return False
def annotation_delete(module, pkgng_path, package, tag, value, rootdir_arg):
_value = annotation_query(module, pkgng_path, package, tag, rootdir_arg)
if _value:
rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
% (pkgng_path, rootdir_arg, package, tag))
if rc != 0:
module.fail_json("could not delete annotation to %s: %s"
% (package, out), stderr=err)
return True
return False
def annotation_modify(module, pkgng_path, package, tag, value, rootdir_arg):
_value = annotation_query(module, pkgng_path, package, tag, rootdir_arg)
if not value:
# No such tag
module.fail_json("could not change annotation to %s: tag %s does not exist"
% (package, tag))
elif _value == value:
# No change in value
return False
else:
rc,out,err = module.run_command('%s %s annotate -y -M %s %s "%s"'
% (pkgng_path, rootdir_arg, package, tag, value))
if rc != 0:
module.fail_json("could not change annotation annotation to %s: %s"
% (package, out), stderr=err)
return True
def annotate_packages(module, pkgng_path, packages, annotation, rootdir_arg):
annotate_c = 0
annotations = map(lambda _annotation:
re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
_annotation).groupdict(),
re.split(r',', annotation))
operation = {
'+': annotation_add,
'-': annotation_delete,
':': annotation_modify
}
for package in packages:
for _annotation in annotations:
if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
annotate_c += 1
if annotate_c > 0:
return (True, "added %s annotations." % annotate_c)
return (False, "changed no annotations")
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default="present", choices=["present","absent"], required=False),
name = dict(aliases=["pkg"], required=True),
cached = dict(default=False, type='bool'),
annotation = dict(default="", required=False),
pkgsite = dict(default="", required=False),
rootdir = dict(default="", required=False)),
supports_check_mode = True)
pkgng_path = module.get_bin_path('pkg', True)
p = module.params
pkgs = p["name"].split(",")
changed = False
msgs = []
rootdir_arg = ""
if p["rootdir"] != "":
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
if old_pkgng:
module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
else:
rootdir_arg = "--rootdir %s" % (p["rootdir"])
if p["state"] == "present":
_changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], rootdir_arg)
changed = changed or _changed
msgs.append(_msg)
elif p["state"] == "absent":
_changed, _msg = remove_packages(module, pkgng_path, pkgs, rootdir_arg)
changed = changed or _changed
msgs.append(_msg)
if p["annotation"]:
_changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], rootdir_arg)
changed = changed or _changed
msgs.append(_msg)
module.exit_json(changed=changed, msg=", ".join(msgs))
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
NickDaly/GemRB-FixConfig-Branch | gemrb/GUIScripts/bg2/GUIOPT8.py | 5 | 5789 | # GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#gameplay
import GemRB
GamePlayWindow = 0
TextAreaControl = 0
def OnLoad():
global GamePlayWindow, TextAreaControl
GemRB.LoadWindowPack("GUIOPT", 640, 480)
GamePlayWindow = GemRB.LoadWindow(8)
TextAreaControl = GamePlayWindow.GetControl(40)
DelayButton = GamePlayWindow.GetControl(21)
DelaySlider = GamePlayWindow.GetControl(1)
MouseSpdButton = GamePlayWindow.GetControl(22)
MouseSpdSlider = GamePlayWindow.GetControl(2)
KeySpdButton = GamePlayWindow.GetControl(23)
KeySpdSlider = GamePlayWindow.GetControl(3)
DifficultyButton = GamePlayWindow.GetControl(24)
DifficultySlider = GamePlayWindow.GetControl(12)
BloodButton = GamePlayWindow.GetControl(27)
BloodButtonB = GamePlayWindow.GetControl(19)
DitherButton = GamePlayWindow.GetControl(25)
DitherButtonB = GamePlayWindow.GetControl(14)
InfravisionButton = GamePlayWindow.GetControl(44)
InfravisionButtonB = GamePlayWindow.GetControl(42)
WeatherButton = GamePlayWindow.GetControl(46)
WeatherButtonB = GamePlayWindow.GetControl(47)
HealButton = GamePlayWindow.GetControl(48)
HealButtonB = GamePlayWindow.GetControl(50)
HotKeyButton = GamePlayWindow.GetControl(51)
FeedbackButton = GamePlayWindow.GetControl(5)
AutoPauseButton = GamePlayWindow.GetControl(6)
OkButton = GamePlayWindow.GetControl(7)
CancelButton = GamePlayWindow.GetControl(20)
TextAreaControl.SetText(18042)
OkButton.SetText(11973)
CancelButton.SetText(13727)
HotKeyButton.SetText(816)
FeedbackButton.SetText(17163)
AutoPauseButton.SetText(17166)
DelayButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, DelayPress)
DelaySlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, DelayPress)
DelaySlider.SetVarAssoc("Tooltips",0)
KeySpdButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, KeySpdPress)
KeySpdSlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, KeySpdPress)
KeySpdSlider.SetVarAssoc("Keyboard Scroll Speed",0)
MouseSpdButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, MouseSpdPress)
MouseSpdSlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, MouseSpdPress)
MouseSpdSlider.SetVarAssoc("Mouse Scroll Speed",0)
DifficultyButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, DifficultyPress)
DifficultySlider.SetEvent(IE_GUI_SLIDER_ON_CHANGE, DifficultyPress)
DifficultySlider.SetVarAssoc("Difficulty Level",0)
BloodButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, BloodPress)
BloodButtonB.SetEvent(IE_GUI_BUTTON_ON_PRESS, BloodPress)
BloodButtonB.SetFlags(IE_GUI_BUTTON_CHECKBOX,OP_OR)
BloodButtonB.SetVarAssoc("Gore",1)
DitherButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, DitherPress)
DitherButtonB.SetEvent(IE_GUI_BUTTON_ON_PRESS, DitherPress)
DitherButtonB.SetFlags(IE_GUI_BUTTON_CHECKBOX,OP_OR)
DitherButtonB.SetVarAssoc("Always Dither",1)
InfravisionButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, InfravisionPress)
InfravisionButtonB.SetEvent(IE_GUI_BUTTON_ON_PRESS, InfravisionPress)
InfravisionButtonB.SetFlags(IE_GUI_BUTTON_CHECKBOX,OP_OR)
InfravisionButtonB.SetVarAssoc("Infravision",1)
WeatherButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, WeatherPress)
WeatherButtonB.SetEvent(IE_GUI_BUTTON_ON_PRESS, WeatherPress)
WeatherButtonB.SetFlags(IE_GUI_BUTTON_CHECKBOX,OP_OR)
WeatherButtonB.SetVarAssoc("Weather",1)
HealButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, HealPress)
HealButtonB.SetEvent(IE_GUI_BUTTON_ON_PRESS, HealPress)
HealButtonB.SetFlags(IE_GUI_BUTTON_CHECKBOX,OP_OR)
HealButtonB.SetVarAssoc("Heal Party on Rest",1)
HotKeyButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, HotKeyPress)
FeedbackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, FeedbackPress)
AutoPauseButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, AutoPausePress)
OkButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, OkPress)
OkButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
CancelButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, CancelPress)
CancelButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
GamePlayWindow.SetVisible(WINDOW_VISIBLE)
return
def DelayPress():
TextAreaControl.SetText(18017)
return
def KeySpdPress():
TextAreaControl.SetText(18019)
return
def MouseSpdPress():
TextAreaControl.SetText(18018)
return
def DifficultyPress():
TextAreaControl.SetText(18020)
return
def BloodPress():
TextAreaControl.SetText(18023)
return
def DitherPress():
TextAreaControl.SetText(18021)
return
def InfravisionPress():
TextAreaControl.SetText(11797)
return
def WeatherPress():
TextAreaControl.SetText(20619)
return
def HealPress():
TextAreaControl.SetText(2242)
return
def HotKeyPress():
#TextAreaControl.SetText(18016)
return
def FeedbackPress():
GamePlayWindow.SetVisible(WINDOW_INVISIBLE)
if GamePlayWindow:
GamePlayWindow.Unload()
GemRB.SetNextScript("GUIOPT9")
return
def AutoPausePress():
GamePlayWindow.SetVisible(WINDOW_INVISIBLE)
if GamePlayWindow:
GamePlayWindow.Unload()
GemRB.SetNextScript("GUIOPT10")
return
def OkPress():
GamePlayWindow.SetVisible(WINDOW_INVISIBLE)
if GamePlayWindow:
GamePlayWindow.Unload()
GemRB.SetNextScript("StartOpt")
return
def CancelPress():
GamePlayWindow.SetVisible(WINDOW_INVISIBLE)
if GamePlayWindow:
GamePlayWindow.Unload()
GemRB.SetNextScript("StartOpt")
return
| gpl-2.0 |
sfcl/ancon | index/forms/tr_to_firm.py | 2 | 4712 | # -*- coding:utf-8 -*-
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from index.models import FirmTonerRefill
from docs.models import SCDoc
class TransfeToFirm(forms.Form):
numbers = forms.CharField(widget=forms.HiddenInput(), required=True)
firm = forms.ModelChoiceField(queryset=FirmTonerRefill.objects.all(),
error_messages={'required': _('Required field.')},
empty_label='',
required=True,
widget=forms.Select(attrs={'class':'load_doc_ajax'})
)
doc = forms.ModelChoiceField(queryset=SCDoc.objects.filter(), required=False)
def clean_numbers(self):
"""Производим проверку строки на соответствие вида 4,5,6,7.
Возвращает список из номеров картриджей.
"""
if not self.cleaned_data.get('numbers', ''):
raise ValidationError(_('Required field.'))
ret_list = self.cleaned_data.get('numbers', '')
ret_list = ret_list.split(',')
# преобразуем список строк в список айдишников
tmp = list()
for i in ret_list:
try:
i = int(i)
except ValueError:
i = 0
tmp.append(i)
ret_list = tmp
return ret_list
def clean_doc(self):
"""
"""
if not self.cleaned_data.get('doc', ''):
return None
doc_id = self.cleaned_data.get('doc', '')
return doc_id.pk
def clean_firm(self):
"""
"""
if not self.cleaned_data.get('firm', ''):
raise ValidationError(_('Required field.'))
# TODO выполнить более продвинутую проверку на существование pk в СУБД
firm = self.cleaned_data.get('firm')
return firm.pk
class TransfeToFirmScanner(forms.Form):
"""Форма передачи РМ в фоирму для облуживания.
Формируемый список РМ задаётся сканером штрих кодов.
"""
#scan_number = forms.CharField(max_length=256, widget=forms.TextInput(attrs={'readonly': True, 'class': 'barcode'}), required=True)
scan_number = forms.CharField(max_length=256, widget=forms.TextInput(attrs={'readonly': True, 'class': 'barcode'}), required=False)
numbers = forms.CharField(widget=forms.HiddenInput(), required=True)
firm = forms.ModelChoiceField(queryset=FirmTonerRefill.objects.all(),
error_messages={'required': _('Required field.')},
empty_label='',
required=True,
widget=forms.Select(attrs={'class':'load_doc_ajax'})
)
doc = forms.ModelChoiceField(queryset=SCDoc.objects.filter(), required=False)
#price = forms.CharField(required=False)
def clean_numbers(self):
"""Производим проверку строки на соответствие вида 4,5,6,7.
Возвращает список из номеров картриджей.
"""
if not self.cleaned_data.get('numbers', ''):
raise ValidationError(_('Required field.'))
ret_list = self.cleaned_data.get('numbers', '')
ret_list = ret_list.split(',')
# преобразуем список строк в список айдишников
tmp = list()
for i in ret_list:
try:
i = int(i)
except ValueError:
i = 0
tmp.append(i)
ret_list = tmp
return ret_list
def clean_doc(self):
"""
"""
if not self.cleaned_data.get('doc', ''):
return None
doc_id = self.cleaned_data.get('doc', '')
return doc_id.pk
def clean_firm(self):
"""
"""
if not self.cleaned_data.get('firm', ''):
raise ValidationError(_('Required field.'))
# TODO выполнить более продвинутую проверку на существование pk в СУБД
try:
firm = self.cleaned_data.get('firm')
except:
raise ValidationError(_('Firm object error.'))
return firm.pk
| gpl-2.0 |
Salat-Cx65/python-for-android | python3-alpha/python3-src/Lib/asyncore.py | 46 | 21009 | # -*- Mode: Python -*-
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import select
import socket
import sys
import time
import warnings
import os
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \
ENOTCONN, ESHUTDOWN, EINTR, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \
errorcode
_DISCONNECTED = frozenset((ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE,
EBADF))
try:
socket_map
except NameError:
socket_map = {}
def _strerror(err):
try:
return os.strerror(err)
except (ValueError, OverflowError, NameError):
if err in errorcode:
return errorcode[err]
return "Unknown error %s" %err
class ExitNow(Exception):
pass
_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit)
def read(obj):
try:
obj.handle_read_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def write(obj):
try:
obj.handle_write_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def _exception(obj):
try:
obj.handle_expt_event()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & select.POLLIN:
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
if flags & select.POLLPRI:
obj.handle_expt_event()
if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL):
obj.handle_close()
except socket.error as e:
if e.args[0] not in _DISCONNECTED:
obj.handle_error()
else:
obj.handle_close()
except _reraised_exceptions:
raise
except:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = socket_map
if map:
r = []; w = []; e = []
for fd, obj in list(map.items()):
is_r = obj.readable()
is_w = obj.writable()
if is_r:
r.append(fd)
# accepting sockets should not be writable
if is_w and not obj.accepting:
w.append(fd)
if is_r or is_w:
e.append(fd)
if [] == r == w == e:
time.sleep(timeout)
return
try:
r, w, e = select.select(r, w, e, timeout)
except select.error as err:
if err.args[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
for fd in e:
obj = map.get(fd)
if obj is None:
continue
_exception(obj)
def poll2(timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
pollster = select.poll()
if map:
for fd, obj in list(map.items()):
flags = 0
if obj.readable():
flags |= select.POLLIN | select.POLLPRI
# accepting sockets should not be writable
if obj.writable() and not obj.accepting:
flags |= select.POLLOUT
if flags:
# Only check for exceptions if object was either readable
# or writable.
flags |= select.POLLERR | select.POLLHUP | select.POLLNVAL
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error as err:
if err.args[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
poll3 = poll2 # Alias for backward compatibility
def loop(timeout=30.0, use_poll=False, map=None, count=None):
if map is None:
map = socket_map
if use_poll and hasattr(select, 'poll'):
poll_fun = poll2
else:
poll_fun = poll
if count is None:
while map:
poll_fun(timeout, map)
else:
while map and count > 0:
poll_fun(timeout, map)
count = count - 1
class dispatcher:
debug = False
connected = False
accepting = False
closing = False
addr = None
ignore_log_types = frozenset(['warning'])
def __init__(self, sock=None, map=None):
if map is None:
self._map = socket_map
else:
self._map = map
self._fileno = None
if sock:
# Set to nonblocking just to make sure for cases where we
# get a socket from a blocking source.
sock.setblocking(0)
self.set_socket(sock, map)
self.connected = True
# The constructor no longer requires that the socket
# passed be connected.
try:
self.addr = sock.getpeername()
except socket.error as err:
if err.args[0] == ENOTCONN:
# To handle the case where we got an unconnected
# socket.
self.connected = False
else:
# The socket is broken in some unknown way, alert
# the user and remove it from the map (to prevent
# polling of broken sockets).
self.del_channel(map)
raise
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__+"."+self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
return '<%s at %#x>' % (' '.join(status), id(self))
__str__ = __repr__
def add_channel(self, map=None):
#self.log_info('adding channel %s' % self)
if map is None:
map = self._map
map[self._fileno] = self
def del_channel(self, map=None):
fd = self._fileno
if map is None:
map = self._map
if fd in map:
#self.log_info('closing channel %d:%s' % (fd, self))
del map[fd]
self._fileno = None
def create_socket(self, family, type):
self.family_and_type = family, type
sock = socket.socket(family, type)
sock.setblocking(0)
self.set_socket(sock)
def set_socket(self, sock, map=None):
self.socket = sock
## self.__dict__['socket'] = sock
self._fileno = sock.fileno()
self.add_channel(map)
def set_reuse_addr(self):
# try to re-use a server port if possible
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
# ==================================================
# predicates for select()
# these are used as filters for the lists of sockets
# to pass to select().
# ==================================================
def readable(self):
return True
def writable(self):
return True
# ==================================================
# socket object methods.
# ==================================================
def listen(self, num):
self.accepting = True
if os.name == 'nt' and num > 5:
num = 5
return self.socket.listen(num)
def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
def connect(self, address):
self.connected = False
err = self.socket.connect_ex(address)
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
or err == EINVAL and os.name in ('nt', 'ce'):
return
if err in (0, EISCONN):
self.addr = address
self.handle_connect_event()
else:
raise socket.error(err, errorcode[err])
def accept(self):
# XXX can return either an address pair or None
try:
conn, addr = self.socket.accept()
except TypeError:
return None
except socket.error as why:
if why.args[0] in (EWOULDBLOCK, ECONNABORTED, EAGAIN):
return None
else:
raise
else:
return conn, addr
def send(self, data):
try:
result = self.socket.send(data)
return result
except socket.error as why:
if why.args[0] == EWOULDBLOCK:
return 0
elif why.args[0] in _DISCONNECTED:
self.handle_close()
return 0
else:
raise
def recv(self, buffer_size):
try:
data = self.socket.recv(buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return b''
else:
return data
except socket.error as why:
# winsock sometimes throws ENOTCONN
if why.args[0] in _DISCONNECTED:
self.handle_close()
return b''
else:
raise
def close(self):
self.connected = False
self.accepting = False
self.del_channel()
try:
self.socket.close()
except socket.error as why:
if why.args[0] not in (ENOTCONN, EBADF):
raise
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
def __getattr__(self, attr):
try:
retattr = getattr(self.socket, attr)
except AttributeError:
raise AttributeError("%s instance has no attribute '%s'"
%(self.__class__.__name__, attr))
else:
msg = "%(me)s.%(attr)s is deprecated; use %(me)s.socket.%(attr)s " \
"instead" % {'me' : self.__class__.__name__, 'attr' : attr}
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return retattr
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
def log(self, message):
sys.stderr.write('log: %s\n' % str(message))
def log_info(self, message, type='info'):
if type not in self.ignore_log_types:
print('%s: %s' % (type, message))
def handle_read_event(self):
if self.accepting:
# accepting sockets are never connected, they "spawn" new
# sockets that are connected
self.handle_accept()
elif not self.connected:
self.handle_connect_event()
self.handle_read()
else:
self.handle_read()
def handle_connect_event(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect()
self.connected = True
def handle_write_event(self):
if self.accepting:
# Accepting sockets shouldn't get a write event.
# We will pretend it didn't happen.
return
if not self.connected:
#check for errors
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err, _strerror(err))
self.handle_connect_event()
self.handle_write()
def handle_expt_event(self):
# handle_expt_event() is called if there might be an error on the
# socket, or if there is OOB data
# check for the error condition first
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
# we can get here when select.select() says that there is an
# exceptional condition on the socket
# since there is an error, we'll go ahead and close the socket
# like we would in a subclassed handle_read() that received no
# data
self.handle_close()
else:
self.handle_expt()
def handle_error(self):
nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
self.handle_close()
def handle_expt(self):
self.log_info('unhandled incoming priority event', 'warning')
def handle_read(self):
self.log_info('unhandled read event', 'warning')
def handle_write(self):
self.log_info('unhandled write event', 'warning')
def handle_connect(self):
self.log_info('unhandled connect event', 'warning')
def handle_accept(self):
pair = self.accept()
if pair is not None:
self.handle_accepted(*pair)
def handle_accepted(self, sock, addr):
sock.close()
self.log_info('unhandled accepted event', 'warning')
def handle_close(self):
self.log_info('unhandled close event', 'warning')
self.close()
# ---------------------------------------------------------------------------
# adds simple buffered output capability, useful for simple clients.
# [for more sophisticated usage use asynchat.async_chat]
# ---------------------------------------------------------------------------
class dispatcher_with_send(dispatcher):
def __init__(self, sock=None, map=None):
dispatcher.__init__(self, sock, map)
self.out_buffer = b''
def initiate_send(self):
num_sent = 0
num_sent = dispatcher.send(self, self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
if self.debug:
self.log_info('sending %s' % repr(data))
self.out_buffer = self.out_buffer + data
self.initiate_send()
# ---------------------------------------------------------------------------
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback():
t, v, tb = sys.exc_info()
tbinfo = []
if not tb: # Must have a traceback
raise AssertionError("traceback does not exist")
while tb:
tbinfo.append((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str(tb.tb_lineno)
))
tb = tb.tb_next
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
return (file, function, line), t, v, info
def close_all(map=None, ignore_all=False):
if map is None:
map = socket_map
for x in list(map.values()):
try:
x.close()
except OSError as x:
if x.args[0] == EBADF:
pass
elif not ignore_all:
raise
except _reraised_exceptions:
raise
except:
if not ignore_all:
raise
map.clear()
# Asynchronous File I/O:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
if os.name == 'posix':
import fcntl
class file_wrapper:
# Here we override just enough to make a file
# look like a socket for the purposes of asyncore.
# The passed fd is automatically os.dup()'d
def __init__(self, fd):
self.fd = os.dup(fd)
def recv(self, *args):
return os.read(self.fd, *args)
def send(self, *args):
return os.write(self.fd, *args)
def getsockopt(self, level, optname, buflen=None):
if (level == socket.SOL_SOCKET and
optname == socket.SO_ERROR and
not buflen):
return 0
raise NotImplementedError("Only asyncore specific behaviour "
"implemented.")
read = recv
write = send
def close(self):
os.close(self.fd)
def fileno(self):
return self.fd
class file_dispatcher(dispatcher):
def __init__(self, fd, map=None):
dispatcher.__init__(self, None, map)
self.connected = True
try:
fd = fd.fileno()
except AttributeError:
pass
self.set_file(fd)
# set it to non-blocking mode
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def set_file(self, fd):
self.socket = file_wrapper(fd)
self._fileno = self.socket.fileno()
self.add_channel()
| apache-2.0 |
avlach/univbris-ocf | vt_manager/src/python/vt_manager/communication/sfa/util/callids.py | 2 | 2280 | #!/usr/bin/python
import threading
import time
#from vt_manager.communication.sfa.util.sfalogging import logger
"""
Callids: a simple mechanism to remember the call ids served so fas
memory-only for now - thread-safe
implemented as a (singleton) hash 'callid'->timestamp
"""
debug=False
class _call_ids_impl (dict):
_instance = None
# 5 minutes sounds amply enough
purge_timeout=5*60
# when trying to get a lock
retries=10
# in ms
wait_ms=100
def __init__(self):
self._lock=threading.Lock()
# the only primitive
# return True if the callid is unknown, False otherwise
def already_handled (self,call_id):
# if not provided in the call...
if not call_id: return False
has_lock=False
for attempt in range(_call_ids_impl.retries):
if debug: logger.debug("Waiting for lock (%d)"%attempt)
if self._lock.acquire(False):
has_lock=True
if debug: logger.debug("got lock (%d)"%attempt)
break
time.sleep(float(_call_ids_impl.wait_ms)/1000)
# in the unlikely event where we can't get the lock
if not has_lock:
logger.warning("_call_ids_impl.should_handle_call_id: could not acquire lock")
return False
# we're good to go
if self.has_key(call_id):
self._purge()
self._lock.release()
return True
self[call_id]=time.time()
self._purge()
self._lock.release()
if debug: logger.debug("released lock")
return False
def _purge(self):
now=time.time()
o_keys=[]
for (k,v) in self.iteritems():
if (now-v) >= _call_ids_impl.purge_timeout: o_keys.append(k)
for k in o_keys:
if debug: logger.debug("Purging call_id %r (%s)"%(k,time.strftime("%H:%M:%S",time.localtime(self[k]))))
del self[k]
if debug:
logger.debug("AFTER PURGE")
for (k,v) in self.iteritems(): logger.debug("%s -> %s"%(k,time.strftime("%H:%M:%S",time.localtime(v))))
def Callids ():
if not _call_ids_impl._instance:
_call_ids_impl._instance = _call_ids_impl()
return _call_ids_impl._instance
| bsd-3-clause |
openweave/openweave-core | src/test-apps/happy/tests/standalone/wdmNext/test_weave_wdm_next_mutual_subscribe_48.py | 1 | 3501 | #!/usr/bin/env python3
#
# Copyright (c) 2016-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Calls Weave WDM mutual subscribe between nodes.
# I05: Mutual Subscribe: Responder Continuous Events. Mutate data in responder. Client in initiator cancels
# M29: Stress Mutual Subscribe: Responder Continuous Events. Mutate data in responder. Client in initiator cancels
#
from __future__ import absolute_import
from __future__ import print_function
import unittest
import set_test_path
from weave_wdm_next_test_base import weave_wdm_next_test_base
import WeaveUtilities
class test_weave_wdm_next_mutual_subscribe_48(weave_wdm_next_test_base):
def test_weave_wdm_next_mutual_subscribe_48(self):
wdm_next_args = {}
wdm_next_args['wdm_option'] = "mutual_subscribe"
wdm_next_args['total_client_count'] = 4
wdm_next_args['final_client_status'] = 0
wdm_next_args['timer_client_period'] = 16000
wdm_next_args['test_client_iterations'] = 5
wdm_next_args['test_client_delay'] = 35000
wdm_next_args['enable_client_flip'] = 0
wdm_next_args['total_server_count'] = 4
wdm_next_args['final_server_status'] = 4
wdm_next_args['timer_server_period'] = 15000
wdm_next_args['enable_server_flip'] = 1
wdm_next_args['server_event_generator'] = 'Security'
wdm_next_args['server_inter_event_period'] = 2000
wdm_next_args['client_log_check'] = [('Handler\[0\] \[(ALIVE|CONFM)\] bound mutual subscription is going away', wdm_next_args['test_client_iterations']),
('Client\[0\] \[(ALIVE|CONFM)\] EndSubscription Ref\(\d+\)', wdm_next_args['test_client_iterations']),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'])]
wdm_next_args['server_log_check'] = [('bound mutual subscription is going away', wdm_next_args['test_client_iterations']),
('Client\[0\] \[(ALIVE|CONFM)\] CancelRequestHandler', wdm_next_args['test_client_iterations']),
('Client\[0\] moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations']),
('Handler\[0\] Moving to \[ FREE\] Ref\(0\)', wdm_next_args['test_client_iterations'])]
wdm_next_args['test_tag'] = self.__class__.__name__[19:].upper()
wdm_next_args['test_case_name'] = ['M29: Stress Mutual Subscribe: Responder Continuous Events. Mutate data in responder. Client in initiator cancels']
print('test file: ' + self.__class__.__name__)
print("weave-wdm-next test I05 and M29")
super(test_weave_wdm_next_mutual_subscribe_48, self).weave_wdm_next_test_base(wdm_next_args)
if __name__ == "__main__":
WeaveUtilities.run_unittest()
| apache-2.0 |
Lh4cKg/sl4a | python/src/Lib/ctypes/test/test_macholib.py | 53 | 1601 | import os
import sys
import unittest
# Bob Ippolito:
"""
Ok.. the code to find the filename for __getattr__ should look
something like:
import os
from macholib.dyld import dyld_find
def find_lib(name):
possible = ['lib'+name+'.dylib', name+'.dylib',
name+'.framework/'+name]
for dylib in possible:
try:
return os.path.realpath(dyld_find(dylib))
except ValueError:
pass
raise ValueError, "%s not found" % (name,)
It'll have output like this:
>>> find_lib('pthread')
'/usr/lib/libSystem.B.dylib'
>>> find_lib('z')
'/usr/lib/libz.1.dylib'
>>> find_lib('IOKit')
'/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit'
-bob
"""
from ctypes.macholib.dyld import dyld_find
def find_lib(name):
possible = ['lib'+name+'.dylib', name+'.dylib', name+'.framework/'+name]
for dylib in possible:
try:
return os.path.realpath(dyld_find(dylib))
except ValueError:
pass
raise ValueError("%s not found" % (name,))
class MachOTest(unittest.TestCase):
if sys.platform == "darwin":
def test_find(self):
self.failUnlessEqual(find_lib('pthread'),
'/usr/lib/libSystem.B.dylib')
result = find_lib('z')
self.failUnless(result.startswith('/usr/lib/libz.1'))
self.failUnless(result.endswith('.dylib'))
self.failUnlessEqual(find_lib('IOKit'),
'/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit')
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
israeltobias/DownMedia | youtube-dl/youtube_dl/extractor/videomega.py | 47 | 1987 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
decode_packed_codes,
sanitized_Request,
)
class VideoMegaIE(InfoExtractor):
_VALID_URL = r'(?:videomega:|https?://(?:www\.)?videomega\.tv/(?:(?:view|iframe|cdn)\.php)?\?ref=)(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'http://videomega.tv/cdn.php?ref=AOSQBJYKIDDIKYJBQSOA',
'md5': 'cc1920a58add3f05c6a93285b84fb3aa',
'info_dict': {
'id': 'AOSQBJYKIDDIKYJBQSOA',
'ext': 'mp4',
'title': '1254207',
'thumbnail': r're:^https?://.*\.jpg$',
}
}, {
'url': 'http://videomega.tv/cdn.php?ref=AOSQBJYKIDDIKYJBQSOA&width=1070&height=600',
'only_matching': True,
}, {
'url': 'http://videomega.tv/view.php?ref=090051111052065112106089103052052103089106112065052111051090',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
iframe_url = 'http://videomega.tv/cdn.php?ref=%s' % video_id
req = sanitized_Request(iframe_url)
req.add_header('Referer', url)
req.add_header('Cookie', 'noadvtday=0')
webpage = self._download_webpage(req, video_id)
title = self._html_search_regex(
r'<title>(.+?)</title>', webpage, 'title')
title = re.sub(
r'(?:^[Vv]ideo[Mm]ega\.tv\s-\s*|\s*-\svideomega\.tv$)', '', title)
thumbnail = self._search_regex(
r'<video[^>]+?poster="([^"]+)"', webpage, 'thumbnail', fatal=False)
real_codes = decode_packed_codes(webpage)
video_url = self._search_regex(
r'"src"\s*,\s*"([^"]+)"', real_codes, 'video URL')
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
'http_headers': {
'Referer': iframe_url,
},
}
| gpl-3.0 |
Deepakkothandan/ansible | lib/ansible/modules/network/iosxr/iosxr_facts.py | 27 | 12721 | #!/usr/bin/python
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: iosxr_facts
version_added: "2.2"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Collect facts from remote devices running IOS XR
description:
- Collects a base set of device facts from a remote device that
is running IOS XR. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: iosxr
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(M(!)) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Collect all facts from the device
- iosxr_facts:
gather_subset: all
# Collect only the config and default facts
- iosxr_facts:
gather_subset:
- config
# Do not collect hardware facts
- iosxr_facts:
gather_subset:
- "!hardware"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: string
ansible_net_image:
description: The image file the device is running
returned: always
type: string
# hardware
ansible_net_filesystems:
description: All file system names available on the device
returned: when hardware is configured
type: list
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.iosxr import iosxr_argument_spec, check_args, run_commands
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import zip
class FactsBase(object):
def __init__(self):
self.facts = dict()
self.commands()
def commands(self):
raise NotImplementedError
class Default(FactsBase):
def commands(self):
return(['show version brief'])
def populate(self, results):
self.facts['version'] = self.parse_version(results['show version brief'])
self.facts['image'] = self.parse_image(results['show version brief'])
self.facts['hostname'] = self.parse_hostname(results['show version brief'])
def parse_version(self, data):
match = re.search(r'Version (\S+)$', data, re.M)
if match:
return match.group(1)
def parse_hostname(self, data):
match = re.search(r'^(.+) uptime', data, re.M)
if match:
return match.group(1)
def parse_image(self, data):
match = re.search(r'image file is "(.+)"', data)
if match:
return match.group(1)
class Hardware(FactsBase):
def commands(self):
return(['dir /all', 'show memory summary'])
def populate(self, results):
self.facts['filesystems'] = self.parse_filesystems(
results['dir /all'])
match = re.search(r'Physical Memory: (\d+)M total \((\d+)',
results['show memory summary'])
if match:
self.facts['memtotal_mb'] = match.group(1)
self.facts['memfree_mb'] = match.group(2)
def parse_filesystems(self, data):
return re.findall(r'^Directory of (\S+)', data, re.M)
class Config(FactsBase):
def commands(self):
return(['show running-config'])
def populate(self, results):
self.facts['config'] = results['show running-config']
class Interfaces(FactsBase):
def commands(self):
return(['show interfaces', 'show ipv6 interface',
'show lldp', 'show lldp neighbors detail'])
def populate(self, results):
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
interfaces = self.parse_interfaces(results['show interfaces'])
self.facts['interfaces'] = self.populate_interfaces(interfaces)
data = results['show ipv6 interface']
if len(data) > 0:
data = self.parse_interfaces(data)
self.populate_ipv6_interfaces(data)
if 'LLDP is not enabled' not in results['show lldp']:
neighbors = results['show lldp neighbors detail']
self.facts['neighbors'] = self.parse_neighbors(neighbors)
def populate_interfaces(self, interfaces):
facts = dict()
for key, value in iteritems(interfaces):
intf = dict()
intf['description'] = self.parse_description(value)
intf['macaddress'] = self.parse_macaddress(value)
ipv4 = self.parse_ipv4(value)
intf['ipv4'] = self.parse_ipv4(value)
if ipv4:
self.add_ip_address(ipv4['address'], 'ipv4')
intf['mtu'] = self.parse_mtu(value)
intf['bandwidth'] = self.parse_bandwidth(value)
intf['duplex'] = self.parse_duplex(value)
intf['lineprotocol'] = self.parse_lineprotocol(value)
intf['operstatus'] = self.parse_operstatus(value)
intf['type'] = self.parse_type(value)
facts[key] = intf
return facts
def populate_ipv6_interfaces(self, data):
for key, value in iteritems(data):
if key in ['No', 'RPF'] or key.startswith('IP'):
continue
self.facts['interfaces'][key]['ipv6'] = list()
addresses = re.findall(r'\s+(.+), subnet', value, re.M)
subnets = re.findall(r', subnet is (.+)$', value, re.M)
for addr, subnet in zip(addresses, subnets):
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def parse_neighbors(self, neighbors):
facts = dict()
nbors = neighbors.split('------------------------------------------------')
for entry in nbors[1:]:
if entry == '':
continue
intf = self.parse_lldp_intf(entry)
if intf not in facts:
facts[intf] = list()
fact = dict()
fact['host'] = self.parse_lldp_host(entry)
fact['port'] = self.parse_lldp_port(entry)
facts[intf].append(fact)
return facts
def parse_interfaces(self, data):
parsed = dict()
key = ''
for line in data.split('\n'):
if len(line) == 0:
continue
elif line[0] == ' ':
parsed[key] += '\n%s' % line
else:
match = re.match(r'^(\S+)', line)
if match:
key = match.group(1)
parsed[key] = line
return parsed
def parse_description(self, data):
match = re.search(r'Description: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_macaddress(self, data):
match = re.search(r'address is (\S+)', data)
if match:
return match.group(1)
def parse_ipv4(self, data):
match = re.search(r'Internet address is (\S+)/(\d+)', data)
if match:
addr = match.group(1)
masklen = int(match.group(2))
return dict(address=addr, masklen=masklen)
def parse_mtu(self, data):
match = re.search(r'MTU (\d+)', data)
if match:
return int(match.group(1))
def parse_bandwidth(self, data):
match = re.search(r'BW (\d+)', data)
if match:
return int(match.group(1))
def parse_duplex(self, data):
match = re.search(r'(\w+) Duplex', data, re.M)
if match:
return match.group(1)
def parse_type(self, data):
match = re.search(r'Hardware is (.+),', data, re.M)
if match:
return match.group(1)
def parse_lineprotocol(self, data):
match = re.search(r'line protocol is (.+)\s+?$', data, re.M)
if match:
return match.group(1)
def parse_operstatus(self, data):
match = re.search(r'^(?:.+) is (.+),', data, re.M)
if match:
return match.group(1)
def parse_lldp_intf(self, data):
match = re.search(r'^Local Interface: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_lldp_host(self, data):
match = re.search(r'System Name: (.+)$', data, re.M)
if match:
return match.group(1)
def parse_lldp_port(self, data):
match = re.search(r'Port id: (.+)$', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
spec.update(iosxr_argument_spec)
module = AnsibleModule(argument_spec=spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key]())
try:
for inst in instances:
commands = inst.commands()
responses = run_commands(module, commands)
results = dict(zip(commands, responses))
inst.populate(results)
facts.update(inst.facts)
except Exception:
module.exit_json(out=module.from_json(results))
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
| gpl-3.0 |
bl4ckic3/binnavi | src/main/java/com/google/security/zynamics/binnavi/scripts/mono/sample/sample.py | 70 | 6982 | """
Copyright 2014 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# This sample plugin shows how to use the monotone framework from Python
# to find out what registers are modified by a function.
#
# If you want to write your own monotone framework plugin you can use
# this file as a skeleton for your own plugin. Just copy and paste the
# code of this plugin to a new file and add your own code there.
import sys
from sets import Set
from javax.swing import JMenuItem as JMenuItem
from javax.swing import AbstractAction as AbstractAction
from com.google.security.zynamics.binnavi.API.helpers import MessageBox as MessageBox
from com.google.security.zynamics.binnavi.API.plugins import IGraphMenuPlugin as IGraphMenuPlugin
from com.google.security.zynamics.binnavi.API.reil.mono import ILattice
from com.google.security.zynamics.binnavi.API.reil.mono import ILatticeElement
from com.google.security.zynamics.binnavi.API.reil.mono import MonotoneSolver
from com.google.security.zynamics.binnavi.API.reil.mono import ITransformationProvider
from com.google.security.zynamics.binnavi.API.reil.mono import DownWalker
from com.google.security.zynamics.binnavi.API.reil.mono import DefaultStateVector
from com.google.security.zynamics.binnavi.API.reil.mono import InstructionGraph
# Determines whether an instruction writes a native register
def writes_native_register(instruction):
return instruction.thirdOperand.value.isalnum() and \
instruction.thirdOperand.value.isdigit() == False and \
instruction.thirdOperand.value[0] != 't' and \
instruction.mnemonic not in ("jcc", "ldm", "stm")
# This class is used for the elements of the lattice. Each lattice element
# is used to keep track of the known state for a REIL instruction during
# analysis. Since this plugin keeps track of written registers, the kept
# state says what registers are written after this instruction is
# executed.
class SkeletonLatticeElement(ILatticeElement):
def __init__(self):
self.written_registers = Set()
def equals(self, rhs):
# This function helps MonoREIL to end the fixed point iteration
return self.written_registers == rhs.written_registers
def lessThan(self, rhs):
# This function helps MonoREIL to check the monotonous requirement.
return self.written_registers < rhs.written_registers
# This class defines the lattice used by the monotone framework. Its only
# purpose is to defined a function that is used to combine a list of states
# into one state.
class SkeletonLattice(ILattice):
def combine(self, states):
combined_state = SkeletonLatticeElement()
for state in states:
combined_state.written_registers = combined_state.written_registers.union(state.element.written_registers)
return combined_state
# This class provides the transformations each instruction has on a state. For
# each instruction of the instruction graph, the current state of the instruction
# and the combined state of the influencing nodes is passed to the function.
# The function returns the state of the instruction while considering the input
# states.
class SkeletonTransformationProvider(ITransformationProvider):
def transform(self, node, currentState, influencingState):
transformed_state = SkeletonLatticeElement()
transformed_state.written_registers = transformed_state.written_registers.union(currentState.written_registers)
transformed_state.written_registers = transformed_state.written_registers.union(influencingState.written_registers)
return transformed_state
# This function creates the initial state of the state vector passed to the
# monotone framework. In the beginning the state of each instruction is defined
# as the register it writes.
def generateStartVector(graph):
startVector = DefaultStateVector()
for node in graph:
element = SkeletonLatticeElement()
if writes_native_register(node.instruction):
element.written_registers.add(node.instruction.thirdOperand.value)
startVector.setState(node, element)
return startVector
class MessageAction(AbstractAction):
def __init__(self, pi, frame):
AbstractAction.__init__(self, "Monotone Framework Sample")
self.pi = pi
self.frame = frame
def actionPerformed(self, e):
# The monotone framework only works on REIL graphs so we have to translate
# the current view to REIL first.
reilGraph = self.frame.view2D.view.reilCode
# Generally the monotone framework works on graphs where each node represents
# a REIL instruction. For this reason there is a helper function that creates
# this instruction graph from a REIL graph.
graph = InstructionGraph.create(reilGraph.graph)
# Define the lattice used by the monotone framework.
lattice = SkeletonLattice()
# Generate the initial state vector.
startVector = generateStartVector(graph)
# Define the transformations used by the monotone framework.
transformationProvider = SkeletonTransformationProvider()
# Register tracking starts at the beginning of a function and moves
# downwards, so we use the default DownWalker class to move through
# the graph.
walker = DownWalker()
# Use the monotone framework to find what registers are defined by the current function.
solver = MonotoneSolver(graph, lattice, startVector, transformationProvider, walker)
results = solver.solve()
# Process and display the results
used_register_set = Set()
for node in graph:
used_register_set = used_register_set.union(results.getState(node).written_registers)
register_list = list(used_register_set)
register_list.sort()
joinedString = ", ".join(register_list)
MessageBox.showInformation(self.frame.window.frame, "This function modifies the registers %s." % joinedString)
class MonotoneSkeletonPlugin(IGraphMenuPlugin):
def getName(self):
return "Monotone Framework Sample (Register Usage)"
def getGuid(self):
return 564378237613635
def getDescription(self):
return "Skeleton for monotone framework plugins (shows what registers are written by a function)"
def init(self, pi):
self.pi = pi
def closed(self, pi):
pass
def unload(self):
pass
def extendPluginMenu(self, frame):
return [ JMenuItem(MessageAction(self.pi, frame)) ]
skeleton = MonotoneSkeletonPlugin()
navi.getPluginRegistry().addPlugin(skeleton)
| apache-2.0 |
richardtrip/noteII | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
stanlyxiang/incubator-hawq | tools/bin/pythonSrc/pexpect-4.2/pexpect/run.py | 49 | 6632 | import sys
import types
from .exceptions import EOF, TIMEOUT
from .pty_spawn import spawn
def run(command, timeout=30, withexitstatus=False, events=None,
extra_args=None, logfile=None, cwd=None, env=None, **kwargs):
'''
This function runs the given command; waits for it to finish; then
returns all output as a string. STDERR is included in output. If the full
path to the command is not given then the path is searched.
Note that lines are terminated by CR/LF (\\r\\n) combination even on
UNIX-like systems because this is the standard for pseudottys. If you set
'withexitstatus' to true, then run will return a tuple of (command_output,
exitstatus). If 'withexitstatus' is false then this returns just
command_output.
The run() function can often be used instead of creating a spawn instance.
For example, the following code uses spawn::
from pexpect import *
child = spawn('scp foo user@example.com:.')
child.expect('(?i)password')
child.sendline(mypassword)
The previous code can be replace with the following::
from pexpect import *
run('scp foo user@example.com:.', events={'(?i)password': mypassword})
**Examples**
Start the apache daemon on the local machine::
from pexpect import *
run("/usr/local/apache/bin/apachectl start")
Check in a file using SVN::
from pexpect import *
run("svn ci -m 'automatic commit' my_file.py")
Run a command and capture exit status::
from pexpect import *
(command_output, exitstatus) = run('ls -l /bin', withexitstatus=1)
The following will run SSH and execute 'ls -l' on the remote machine. The
password 'secret' will be sent if the '(?i)password' pattern is ever seen::
run("ssh username@machine.example.com 'ls -l'",
events={'(?i)password':'secret\\n'})
This will start mencoder to rip a video from DVD. This will also display
progress ticks every 5 seconds as it runs. For example::
from pexpect import *
def print_ticks(d):
print d['event_count'],
run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
events={TIMEOUT:print_ticks}, timeout=5)
The 'events' argument should be either a dictionary or a tuple list that
contains patterns and responses. Whenever one of the patterns is seen
in the command output, run() will send the associated response string.
So, run() in the above example can be also written as:
run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
events=[(TIMEOUT,print_ticks)], timeout=5)
Use a tuple list for events if the command output requires a delicate
control over what pattern should be matched, since the tuple list is passed
to pexpect() as its pattern list, with the order of patterns preserved.
Note that you should put newlines in your string if Enter is necessary.
Like the example above, the responses may also contain a callback, either
a function or method. It should accept a dictionary value as an argument.
The dictionary contains all the locals from the run() function, so you can
access the child spawn object or any other variable defined in run()
(event_count, child, and extra_args are the most useful). A callback may
return True to stop the current run process. Otherwise run() continues
until the next event. A callback may also return a string which will be
sent to the child. 'extra_args' is not used by directly run(). It provides
a way to pass data to a callback function through run() through the locals
dictionary passed to a callback.
Like :class:`spawn`, passing *encoding* will make it work with unicode
instead of bytes. You can pass *codec_errors* to control how errors in
encoding and decoding are handled.
'''
if timeout == -1:
child = spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env,
**kwargs)
else:
child = spawn(command, timeout=timeout, maxread=2000, logfile=logfile,
cwd=cwd, env=env, **kwargs)
if isinstance(events, list):
patterns= [x for x,y in events]
responses = [y for x,y in events]
elif isinstance(events, dict):
patterns = list(events.keys())
responses = list(events.values())
else:
# This assumes EOF or TIMEOUT will eventually cause run to terminate.
patterns = None
responses = None
child_result_list = []
event_count = 0
while True:
try:
index = child.expect(patterns)
if isinstance(child.after, child.allowed_string_types):
child_result_list.append(child.before + child.after)
else:
# child.after may have been a TIMEOUT or EOF,
# which we don't want appended to the list.
child_result_list.append(child.before)
if isinstance(responses[index], child.allowed_string_types):
child.send(responses[index])
elif (isinstance(responses[index], types.FunctionType) or
isinstance(responses[index], types.MethodType)):
callback_result = responses[index](locals())
sys.stdout.flush()
if isinstance(callback_result, child.allowed_string_types):
child.send(callback_result)
elif callback_result:
break
else:
raise TypeError("parameter `event' at index {index} must be "
"a string, method, or function: {value!r}"
.format(index=index, value=responses[index]))
event_count = event_count + 1
except TIMEOUT:
child_result_list.append(child.before)
break
except EOF:
child_result_list.append(child.before)
break
child_result = child.string_type().join(child_result_list)
if withexitstatus:
child.close()
return (child_result, child.exitstatus)
else:
return child_result
def runu(command, timeout=30, withexitstatus=False, events=None,
extra_args=None, logfile=None, cwd=None, env=None, **kwargs):
"""Deprecated: pass encoding to run() instead.
"""
kwargs.setdefault('encoding', 'utf-8')
return run(command, timeout=timeout, withexitstatus=withexitstatus,
events=events, extra_args=extra_args, logfile=logfile, cwd=cwd,
env=env, **kwargs)
| apache-2.0 |
anryko/ansible | lib/ansible/modules/cloud/misc/rhevm.py | 21 | 51254 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Timothy Vandenbrande <timothy.vandenbrande@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: rhevm
short_description: RHEV/oVirt automation
description:
- This module only supports oVirt/RHEV version 3.
- A newer module M(ovirt_vm) supports oVirt/RHV version 4.
- Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform.
version_added: "2.2"
requirements:
- ovirtsdk
author:
- Timothy Vandenbrande (@TimothyVandenbrande)
options:
user:
description:
- The user to authenticate with.
type: str
default: admin@internal
password:
description:
- The password for user authentication.
type: str
server:
description:
- The name/IP of your RHEV-m/oVirt instance.
type: str
default: 127.0.0.1
port:
description:
- The port on which the API is reachable.
type: int
default: 443
insecure_api:
description:
- A boolean switch to make a secure or insecure connection to the server.
type: bool
default: no
name:
description:
- The name of the VM.
type: str
cluster:
description:
- The RHEV/oVirt cluster in which you want you VM to start.
type: str
datacenter:
description:
- The RHEV/oVirt datacenter in which you want you VM to start.
type: str
default: Default
state:
description:
- This serves to create/remove/update or powermanage your VM.
type: str
choices: [ absent, cd, down, info, ping, present, restarted, up ]
default: present
image:
description:
- The template to use for the VM.
type: str
type:
description:
- To define if the VM is a server or desktop.
type: str
choices: [ desktop, host, server ]
default: server
vmhost:
description:
- The host you wish your VM to run on.
type: str
vmcpu:
description:
- The number of CPUs you want in your VM.
type: int
default: 2
cpu_share:
description:
- This parameter is used to configure the CPU share.
type: int
default: 0
vmmem:
description:
- The amount of memory you want your VM to use (in GB).
type: int
default: 1
osver:
description:
- The operating system option in RHEV/oVirt.
type: str
default: rhel_6x64
mempol:
description:
- The minimum amount of memory you wish to reserve for this system.
type: int
default: 1
vm_ha:
description:
- To make your VM High Available.
type: bool
default: yes
disks:
description:
- This option uses complex arguments and is a list of disks with the options name, size and domain.
type: list
ifaces:
description:
- This option uses complex arguments and is a list of interfaces with the options name and vlan.
type: list
aliases: [ interfaces, nics ]
boot_order:
description:
- This option uses complex arguments and is a list of items that specify the bootorder.
type: list
default: [ hd, network ]
del_prot:
description:
- This option sets the delete protection checkbox.
type: bool
default: yes
cd_drive:
description:
- The CD you wish to have mounted on the VM when I(state = 'CD').
type: str
timeout:
description:
- The timeout you wish to define for power actions.
- When I(state = 'up').
- When I(state = 'down').
- When I(state = 'restarted').
type: int
'''
RETURN = r'''
vm:
description: Returns all of the VMs variables and execution.
returned: always
type: dict
sample: '{
"boot_order": [
"hd",
"network"
],
"changed": true,
"changes": [
"Delete Protection"
],
"cluster": "C1",
"cpu_share": "0",
"created": false,
"datacenter": "Default",
"del_prot": true,
"disks": [
{
"domain": "ssd-san",
"name": "OS",
"size": 40
}
],
"eth0": "00:00:5E:00:53:00",
"eth1": "00:00:5E:00:53:01",
"eth2": "00:00:5E:00:53:02",
"exists": true,
"failed": false,
"ifaces": [
{
"name": "eth0",
"vlan": "Management"
},
{
"name": "eth1",
"vlan": "Internal"
},
{
"name": "eth2",
"vlan": "External"
}
],
"image": false,
"mempol": "0",
"msg": [
"VM exists",
"cpu_share was already set to 0",
"VM high availability was already set to True",
"The boot order has already been set",
"VM delete protection has been set to True",
"Disk web2_Disk0_OS already exists",
"The VM starting host was already set to host416"
],
"name": "web2",
"type": "server",
"uuid": "4ba5a1be-e60b-4368-9533-920f156c817b",
"vm_ha": true,
"vmcpu": "4",
"vmhost": "host416",
"vmmem": "16"
}'
'''
EXAMPLES = r'''
- name: Basic get info from VM
rhevm:
server: rhevm01
user: '{{ rhev.admin.name }}'
password: '{{ rhev.admin.pass }}'
name: demo
state: info
- name: Basic create example from image
rhevm:
server: rhevm01
user: '{{ rhev.admin.name }}'
password: '{{ rhev.admin.pass }}'
name: demo
cluster: centos
image: centos7_x64
state: present
- name: Power management
rhevm:
server: rhevm01
user: '{{ rhev.admin.name }}'
password: '{{ rhev.admin.pass }}'
cluster: RH
name: uptime_server
image: centos7_x64
state: down
- name: Multi disk, multi nic create example
rhevm:
server: rhevm01
user: '{{ rhev.admin.name }}'
password: '{{ rhev.admin.pass }}'
cluster: RH
name: server007
type: server
vmcpu: 4
vmmem: 2
ifaces:
- name: eth0
vlan: vlan2202
- name: eth1
vlan: vlan36
- name: eth2
vlan: vlan38
- name: eth3
vlan: vlan2202
disks:
- name: root
size: 10
domain: ssd-san
- name: swap
size: 10
domain: 15kiscsi-san
- name: opt
size: 10
domain: 15kiscsi-san
- name: var
size: 10
domain: 10kiscsi-san
- name: home
size: 10
domain: sata-san
boot_order:
- network
- hd
state: present
- name: Add a CD to the disk cd_drive
rhevm:
user: '{{ rhev.admin.name }}'
password: '{{ rhev.admin.pass }}'
name: server007
cd_drive: rhev-tools-setup.iso
state: cd
- name: New host deployment + host network configuration
rhevm:
password: '{{ rhevm.admin.pass }}'
name: ovirt_node007
type: host
cluster: rhevm01
ifaces:
- name: em1
- name: em2
- name: p3p1
ip: 172.31.224.200
netmask: 255.255.254.0
- name: p3p2
ip: 172.31.225.200
netmask: 255.255.254.0
- name: bond0
bond:
- em1
- em2
network: rhevm
ip: 172.31.222.200
netmask: 255.255.255.0
management: yes
- name: bond0.36
network: vlan36
ip: 10.2.36.200
netmask: 255.255.254.0
gateway: 10.2.36.254
- name: bond0.2202
network: vlan2202
- name: bond0.38
network: vlan38
state: present
'''
import time
try:
from ovirtsdk.api import API
from ovirtsdk.xml import params
HAS_SDK = True
except ImportError:
HAS_SDK = False
from ansible.module_utils.basic import AnsibleModule
RHEV_FAILED = 1
RHEV_SUCCESS = 0
RHEV_UNAVAILABLE = 2
RHEV_TYPE_OPTS = ['desktop', 'host', 'server']
STATE_OPTS = ['absent', 'cd', 'down', 'info', 'ping', 'present', 'restart', 'up']
msg = []
changed = False
failed = False
class RHEVConn(object):
'Connection to RHEV-M'
def __init__(self, module):
self.module = module
user = module.params.get('user')
password = module.params.get('password')
server = module.params.get('server')
port = module.params.get('port')
insecure_api = module.params.get('insecure_api')
url = "https://%s:%s" % (server, port)
try:
api = API(url=url, username=user, password=password, insecure=str(insecure_api))
api.test()
self.conn = api
except Exception:
raise Exception("Failed to connect to RHEV-M.")
def __del__(self):
self.conn.disconnect()
def createVMimage(self, name, cluster, template):
try:
vmparams = params.VM(
name=name,
cluster=self.conn.clusters.get(name=cluster),
template=self.conn.templates.get(name=template),
disks=params.Disks(clone=True)
)
self.conn.vms.add(vmparams)
setMsg("VM is created")
setChanged()
return True
except Exception as e:
setMsg("Failed to create VM")
setMsg(str(e))
setFailed()
return False
def createVM(self, name, cluster, os, actiontype):
try:
vmparams = params.VM(
name=name,
cluster=self.conn.clusters.get(name=cluster),
os=params.OperatingSystem(type_=os),
template=self.conn.templates.get(name="Blank"),
type_=actiontype
)
self.conn.vms.add(vmparams)
setMsg("VM is created")
setChanged()
return True
except Exception as e:
setMsg("Failed to create VM")
setMsg(str(e))
setFailed()
return False
def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot):
VM = self.get_VM(vmname)
newdisk = params.Disk(
name=diskname,
size=1024 * 1024 * 1024 * int(disksize),
wipe_after_delete=True,
sparse=diskallocationtype,
interface=diskinterface,
format=diskformat,
bootable=diskboot,
storage_domains=params.StorageDomains(
storage_domain=[self.get_domain(diskdomain)]
)
)
try:
VM.disks.add(newdisk)
VM.update()
setMsg("Successfully added disk " + diskname)
setChanged()
except Exception as e:
setFailed()
setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.")
setMsg(str(e))
return False
try:
currentdisk = VM.disks.get(name=diskname)
attempt = 1
while currentdisk.status.state != 'ok':
currentdisk = VM.disks.get(name=diskname)
if attempt == 100:
setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state)))
raise Exception()
else:
attempt += 1
time.sleep(2)
setMsg("The disk " + diskname + " is ready.")
except Exception as e:
setFailed()
setMsg("Error getting the state of " + diskname + ".")
setMsg(str(e))
return False
return True
def createNIC(self, vmname, nicname, vlan, interface):
VM = self.get_VM(vmname)
CLUSTER = self.get_cluster_byid(VM.cluster.id)
DC = self.get_DC_byid(CLUSTER.data_center.id)
newnic = params.NIC(
name=nicname,
network=DC.networks.get(name=vlan),
interface=interface
)
try:
VM.nics.add(newnic)
VM.update()
setMsg("Successfully added iface " + nicname)
setChanged()
except Exception as e:
setFailed()
setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.")
setMsg(str(e))
return False
try:
currentnic = VM.nics.get(name=nicname)
attempt = 1
while currentnic.active is not True:
currentnic = VM.nics.get(name=nicname)
if attempt == 100:
setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active)))
raise Exception()
else:
attempt += 1
time.sleep(2)
setMsg("The iface " + nicname + " is ready.")
except Exception as e:
setFailed()
setMsg("Error getting the state of " + nicname + ".")
setMsg(str(e))
return False
return True
def get_DC(self, dc_name):
return self.conn.datacenters.get(name=dc_name)
def get_DC_byid(self, dc_id):
return self.conn.datacenters.get(id=dc_id)
def get_VM(self, vm_name):
return self.conn.vms.get(name=vm_name)
def get_cluster_byid(self, cluster_id):
return self.conn.clusters.get(id=cluster_id)
def get_cluster(self, cluster_name):
return self.conn.clusters.get(name=cluster_name)
def get_domain_byid(self, dom_id):
return self.conn.storagedomains.get(id=dom_id)
def get_domain(self, domain_name):
return self.conn.storagedomains.get(name=domain_name)
def get_disk(self, disk):
return self.conn.disks.get(disk)
def get_network(self, dc_name, network_name):
return self.get_DC(dc_name).networks.get(network_name)
def get_network_byid(self, network_id):
return self.conn.networks.get(id=network_id)
def get_NIC(self, vm_name, nic_name):
return self.get_VM(vm_name).nics.get(nic_name)
def get_Host(self, host_name):
return self.conn.hosts.get(name=host_name)
def get_Host_byid(self, host_id):
return self.conn.hosts.get(id=host_id)
def set_Memory(self, name, memory):
VM = self.get_VM(name)
VM.memory = int(int(memory) * 1024 * 1024 * 1024)
try:
VM.update()
setMsg("The Memory has been updated.")
setChanged()
return True
except Exception as e:
setMsg("Failed to update memory.")
setMsg(str(e))
setFailed()
return False
def set_Memory_Policy(self, name, memory_policy):
VM = self.get_VM(name)
VM.memory_policy.guaranteed = int(int(memory_policy) * 1024 * 1024 * 1024)
try:
VM.update()
setMsg("The memory policy has been updated.")
setChanged()
return True
except Exception as e:
setMsg("Failed to update memory policy.")
setMsg(str(e))
setFailed()
return False
def set_CPU(self, name, cpu):
VM = self.get_VM(name)
VM.cpu.topology.cores = int(cpu)
try:
VM.update()
setMsg("The number of CPUs has been updated.")
setChanged()
return True
except Exception as e:
setMsg("Failed to update the number of CPUs.")
setMsg(str(e))
setFailed()
return False
def set_CPU_share(self, name, cpu_share):
VM = self.get_VM(name)
VM.cpu_shares = int(cpu_share)
try:
VM.update()
setMsg("The CPU share has been updated.")
setChanged()
return True
except Exception as e:
setMsg("Failed to update the CPU share.")
setMsg(str(e))
setFailed()
return False
def set_Disk(self, diskname, disksize, diskinterface, diskboot):
DISK = self.get_disk(diskname)
setMsg("Checking disk " + diskname)
if DISK.get_bootable() != diskboot:
try:
DISK.set_bootable(diskboot)
setMsg("Updated the boot option on the disk.")
setChanged()
except Exception as e:
setMsg("Failed to set the boot option on the disk.")
setMsg(str(e))
setFailed()
return False
else:
setMsg("The boot option of the disk is correct")
if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
try:
DISK.size = (1024 * 1024 * 1024 * int(disksize))
setMsg("Updated the size of the disk.")
setChanged()
except Exception as e:
setMsg("Failed to update the size of the disk.")
setMsg(str(e))
setFailed()
return False
elif int(DISK.size) > (1024 * 1024 * 1024 * int(disksize)):
setMsg("Shrinking disks is not supported")
setFailed()
return False
else:
setMsg("The size of the disk is correct")
if str(DISK.interface) != str(diskinterface):
try:
DISK.interface = diskinterface
setMsg("Updated the interface of the disk.")
setChanged()
except Exception as e:
setMsg("Failed to update the interface of the disk.")
setMsg(str(e))
setFailed()
return False
else:
setMsg("The interface of the disk is correct")
return True
def set_NIC(self, vmname, nicname, newname, vlan, interface):
NIC = self.get_NIC(vmname, nicname)
VM = self.get_VM(vmname)
CLUSTER = self.get_cluster_byid(VM.cluster.id)
DC = self.get_DC_byid(CLUSTER.data_center.id)
NETWORK = self.get_network(str(DC.name), vlan)
checkFail()
if NIC.name != newname:
NIC.name = newname
setMsg('Updating iface name to ' + newname)
setChanged()
if str(NIC.network.id) != str(NETWORK.id):
NIC.set_network(NETWORK)
setMsg('Updating iface network to ' + vlan)
setChanged()
if NIC.interface != interface:
NIC.interface = interface
setMsg('Updating iface interface to ' + interface)
setChanged()
try:
NIC.update()
setMsg('iface has successfully been updated.')
except Exception as e:
setMsg("Failed to update the iface.")
setMsg(str(e))
setFailed()
return False
return True
def set_DeleteProtection(self, vmname, del_prot):
VM = self.get_VM(vmname)
VM.delete_protected = del_prot
try:
VM.update()
setChanged()
except Exception as e:
setMsg("Failed to update delete protection.")
setMsg(str(e))
setFailed()
return False
return True
def set_BootOrder(self, vmname, boot_order):
VM = self.get_VM(vmname)
bootorder = []
for device in boot_order:
bootorder.append(params.Boot(dev=device))
VM.os.boot = bootorder
try:
VM.update()
setChanged()
except Exception as e:
setMsg("Failed to update the boot order.")
setMsg(str(e))
setFailed()
return False
return True
def set_Host(self, host_name, cluster, ifaces):
HOST = self.get_Host(host_name)
CLUSTER = self.get_cluster(cluster)
if HOST is None:
setMsg("Host does not exist.")
ifacelist = dict()
networklist = []
manageip = ''
try:
for iface in ifaces:
try:
setMsg('creating host interface ' + iface['name'])
if 'management' in iface:
manageip = iface['ip']
if 'boot_protocol' not in iface:
if 'ip' in iface:
iface['boot_protocol'] = 'static'
else:
iface['boot_protocol'] = 'none'
if 'ip' not in iface:
iface['ip'] = ''
if 'netmask' not in iface:
iface['netmask'] = ''
if 'gateway' not in iface:
iface['gateway'] = ''
if 'network' in iface:
if 'bond' in iface:
bond = []
for slave in iface['bond']:
bond.append(ifacelist[slave])
try:
tmpiface = params.Bonding(
slaves=params.Slaves(host_nic=bond),
options=params.Options(
option=[
params.Option(name='miimon', value='100'),
params.Option(name='mode', value='4')
]
)
)
except Exception as e:
setMsg('Failed to create the bond for ' + iface['name'])
setFailed()
setMsg(str(e))
return False
try:
tmpnetwork = params.HostNIC(
network=params.Network(name=iface['network']),
name=iface['name'],
boot_protocol=iface['boot_protocol'],
ip=params.IP(
address=iface['ip'],
netmask=iface['netmask'],
gateway=iface['gateway']
),
override_configuration=True,
bonding=tmpiface)
networklist.append(tmpnetwork)
setMsg('Applying network ' + iface['name'])
except Exception as e:
setMsg('Failed to set' + iface['name'] + ' as network interface')
setFailed()
setMsg(str(e))
return False
else:
tmpnetwork = params.HostNIC(
network=params.Network(name=iface['network']),
name=iface['name'],
boot_protocol=iface['boot_protocol'],
ip=params.IP(
address=iface['ip'],
netmask=iface['netmask'],
gateway=iface['gateway']
))
networklist.append(tmpnetwork)
setMsg('Applying network ' + iface['name'])
else:
tmpiface = params.HostNIC(
name=iface['name'],
network=params.Network(),
boot_protocol=iface['boot_protocol'],
ip=params.IP(
address=iface['ip'],
netmask=iface['netmask'],
gateway=iface['gateway']
))
ifacelist[iface['name']] = tmpiface
except Exception as e:
setMsg('Failed to set ' + iface['name'])
setFailed()
setMsg(str(e))
return False
except Exception as e:
setMsg('Failed to set networks')
setMsg(str(e))
setFailed()
return False
if manageip == '':
setMsg('No management network is defined')
setFailed()
return False
try:
HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey'))
if self.conn.hosts.add(HOST):
setChanged()
HOST = self.get_Host(host_name)
state = HOST.status.state
while (state != 'non_operational' and state != 'up'):
HOST = self.get_Host(host_name)
state = HOST.status.state
time.sleep(1)
if state == 'non_responsive':
setMsg('Failed to add host to RHEVM')
setFailed()
return False
setMsg('status host: up')
time.sleep(5)
HOST = self.get_Host(host_name)
state = HOST.status.state
setMsg('State before setting to maintenance: ' + str(state))
HOST.deactivate()
while state != 'maintenance':
HOST = self.get_Host(host_name)
state = HOST.status.state
time.sleep(1)
setMsg('status host: maintenance')
try:
HOST.nics.setupnetworks(params.Action(
force=True,
check_connectivity=False,
host_nics=params.HostNics(host_nic=networklist)
))
setMsg('nics are set')
except Exception as e:
setMsg('Failed to apply networkconfig')
setFailed()
setMsg(str(e))
return False
try:
HOST.commitnetconfig()
setMsg('Network config is saved')
except Exception as e:
setMsg('Failed to save networkconfig')
setFailed()
setMsg(str(e))
return False
except Exception as e:
if 'The Host name is already in use' in str(e):
setMsg("Host already exists")
else:
setMsg("Failed to add host")
setFailed()
setMsg(str(e))
return False
HOST.activate()
while state != 'up':
HOST = self.get_Host(host_name)
state = HOST.status.state
time.sleep(1)
if state == 'non_responsive':
setMsg('Failed to apply networkconfig.')
setFailed()
return False
setMsg('status host: up')
else:
setMsg("Host exists.")
return True
def del_NIC(self, vmname, nicname):
return self.get_NIC(vmname, nicname).delete()
def remove_VM(self, vmname):
VM = self.get_VM(vmname)
try:
VM.delete()
except Exception as e:
setMsg("Failed to remove VM.")
setMsg(str(e))
setFailed()
return False
return True
def start_VM(self, vmname, timeout):
VM = self.get_VM(vmname)
try:
VM.start()
except Exception as e:
setMsg("Failed to start VM.")
setMsg(str(e))
setFailed()
return False
return self.wait_VM(vmname, "up", timeout)
def wait_VM(self, vmname, state, timeout):
VM = self.get_VM(vmname)
while VM.status.state != state:
VM = self.get_VM(vmname)
time.sleep(10)
if timeout is not False:
timeout -= 10
if timeout <= 0:
setMsg("Timeout expired")
setFailed()
return False
return True
def stop_VM(self, vmname, timeout):
VM = self.get_VM(vmname)
try:
VM.stop()
except Exception as e:
setMsg("Failed to stop VM.")
setMsg(str(e))
setFailed()
return False
return self.wait_VM(vmname, "down", timeout)
def set_CD(self, vmname, cd_drive):
VM = self.get_VM(vmname)
try:
if str(VM.status.state) == 'down':
cdrom = params.CdRom(file=cd_drive)
VM.cdroms.add(cdrom)
setMsg("Attached the image.")
setChanged()
else:
cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000")
cdrom.set_file(cd_drive)
cdrom.update(current=True)
setMsg("Attached the image.")
setChanged()
except Exception as e:
setMsg("Failed to attach image.")
setMsg(str(e))
setFailed()
return False
return True
def set_VM_Host(self, vmname, vmhost):
VM = self.get_VM(vmname)
HOST = self.get_Host(vmhost)
try:
VM.placement_policy.host = HOST
VM.update()
setMsg("Set startup host to " + vmhost)
setChanged()
except Exception as e:
setMsg("Failed to set startup host.")
setMsg(str(e))
setFailed()
return False
return True
def migrate_VM(self, vmname, vmhost):
VM = self.get_VM(vmname)
HOST = self.get_Host_byid(VM.host.id)
if str(HOST.name) != vmhost:
try:
VM.migrate(
action=params.Action(
host=params.Host(
name=vmhost,
)
),
)
setChanged()
setMsg("VM migrated to " + vmhost)
except Exception as e:
setMsg("Failed to set startup host.")
setMsg(str(e))
setFailed()
return False
return True
def remove_CD(self, vmname):
VM = self.get_VM(vmname)
try:
VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete()
setMsg("Removed the image.")
setChanged()
except Exception as e:
setMsg("Failed to remove the image.")
setMsg(str(e))
setFailed()
return False
return True
class RHEV(object):
def __init__(self, module):
self.module = module
def __get_conn(self):
self.conn = RHEVConn(self.module)
return self.conn
def test(self):
self.__get_conn()
return "OK"
def getVM(self, name):
self.__get_conn()
VM = self.conn.get_VM(name)
if VM:
vminfo = dict()
vminfo['uuid'] = VM.id
vminfo['name'] = VM.name
vminfo['status'] = VM.status.state
vminfo['cpu_cores'] = VM.cpu.topology.cores
vminfo['cpu_sockets'] = VM.cpu.topology.sockets
vminfo['cpu_shares'] = VM.cpu_shares
vminfo['memory'] = (int(VM.memory) // 1024 // 1024 // 1024)
vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) // 1024 // 1024 // 1024)
vminfo['os'] = VM.get_os().type_
vminfo['del_prot'] = VM.delete_protected
try:
vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
except Exception:
vminfo['host'] = None
vminfo['boot_order'] = []
for boot_dev in VM.os.get_boot():
vminfo['boot_order'].append(str(boot_dev.dev))
vminfo['disks'] = []
for DISK in VM.disks.list():
disk = dict()
disk['name'] = DISK.name
disk['size'] = (int(DISK.size) // 1024 // 1024 // 1024)
disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name)
disk['interface'] = DISK.interface
vminfo['disks'].append(disk)
vminfo['ifaces'] = []
for NIC in VM.nics.list():
iface = dict()
iface['name'] = str(NIC.name)
iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name)
iface['interface'] = NIC.interface
iface['mac'] = NIC.mac.address
vminfo['ifaces'].append(iface)
vminfo[str(NIC.name)] = NIC.mac.address
CLUSTER = self.conn.get_cluster_byid(VM.cluster.id)
if CLUSTER:
vminfo['cluster'] = CLUSTER.name
else:
vminfo = False
return vminfo
def createVMimage(self, name, cluster, template, disks):
self.__get_conn()
return self.conn.createVMimage(name, cluster, template, disks)
def createVM(self, name, cluster, os, actiontype):
self.__get_conn()
return self.conn.createVM(name, cluster, os, actiontype)
def setMemory(self, name, memory):
self.__get_conn()
return self.conn.set_Memory(name, memory)
def setMemoryPolicy(self, name, memory_policy):
self.__get_conn()
return self.conn.set_Memory_Policy(name, memory_policy)
def setCPU(self, name, cpu):
self.__get_conn()
return self.conn.set_CPU(name, cpu)
def setCPUShare(self, name, cpu_share):
self.__get_conn()
return self.conn.set_CPU_share(name, cpu_share)
def setDisks(self, name, disks):
self.__get_conn()
counter = 0
bootselect = False
for disk in disks:
if 'bootable' in disk:
if disk['bootable'] is True:
bootselect = True
for disk in disks:
diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
disksize = disk.get('size', 1)
diskdomain = disk.get('domain', None)
if diskdomain is None:
setMsg("`domain` is a required disk key.")
setFailed()
return False
diskinterface = disk.get('interface', 'virtio')
diskformat = disk.get('format', 'raw')
diskallocationtype = disk.get('thin', False)
diskboot = disk.get('bootable', False)
if bootselect is False and counter == 0:
diskboot = True
DISK = self.conn.get_disk(diskname)
if DISK is None:
self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot)
else:
self.conn.set_Disk(diskname, disksize, diskinterface, diskboot)
checkFail()
counter += 1
return True
def setNetworks(self, vmname, ifaces):
self.__get_conn()
VM = self.conn.get_VM(vmname)
counter = 0
length = len(ifaces)
for NIC in VM.nics.list():
if counter < length:
iface = ifaces[counter]
name = iface.get('name', None)
if name is None:
setMsg("`name` is a required iface key.")
setFailed()
elif str(name) != str(NIC.name):
setMsg("ifaces are in the wrong order, rebuilding everything.")
for NIC in VM.nics.list():
self.conn.del_NIC(vmname, NIC.name)
self.setNetworks(vmname, ifaces)
checkFail()
return True
vlan = iface.get('vlan', None)
if vlan is None:
setMsg("`vlan` is a required iface key.")
setFailed()
checkFail()
interface = iface.get('interface', 'virtio')
self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface)
else:
self.conn.del_NIC(vmname, NIC.name)
counter += 1
checkFail()
while counter < length:
iface = ifaces[counter]
name = iface.get('name', None)
if name is None:
setMsg("`name` is a required iface key.")
setFailed()
vlan = iface.get('vlan', None)
if vlan is None:
setMsg("`vlan` is a required iface key.")
setFailed()
if failed is True:
return False
interface = iface.get('interface', 'virtio')
self.conn.createNIC(vmname, name, vlan, interface)
counter += 1
checkFail()
return True
def setDeleteProtection(self, vmname, del_prot):
self.__get_conn()
VM = self.conn.get_VM(vmname)
if bool(VM.delete_protected) != bool(del_prot):
self.conn.set_DeleteProtection(vmname, del_prot)
checkFail()
setMsg("`delete protection` has been updated.")
else:
setMsg("`delete protection` already has the right value.")
return True
def setBootOrder(self, vmname, boot_order):
self.__get_conn()
VM = self.conn.get_VM(vmname)
bootorder = []
for boot_dev in VM.os.get_boot():
bootorder.append(str(boot_dev.dev))
if boot_order != bootorder:
self.conn.set_BootOrder(vmname, boot_order)
setMsg('The boot order has been set')
else:
setMsg('The boot order has already been set')
return True
def removeVM(self, vmname):
self.__get_conn()
self.setPower(vmname, "down", 300)
return self.conn.remove_VM(vmname)
def setPower(self, vmname, state, timeout):
self.__get_conn()
VM = self.conn.get_VM(vmname)
if VM is None:
setMsg("VM does not exist.")
setFailed()
return False
if state == VM.status.state:
setMsg("VM state was already " + state)
else:
if state == "up":
setMsg("VM is going to start")
self.conn.start_VM(vmname, timeout)
setChanged()
elif state == "down":
setMsg("VM is going to stop")
self.conn.stop_VM(vmname, timeout)
setChanged()
elif state == "restarted":
self.setPower(vmname, "down", timeout)
checkFail()
self.setPower(vmname, "up", timeout)
checkFail()
setMsg("the vm state is set to " + state)
return True
def setCD(self, vmname, cd_drive):
self.__get_conn()
if cd_drive:
return self.conn.set_CD(vmname, cd_drive)
else:
return self.conn.remove_CD(vmname)
def setVMHost(self, vmname, vmhost):
self.__get_conn()
return self.conn.set_VM_Host(vmname, vmhost)
# pylint: disable=unreachable
VM = self.conn.get_VM(vmname)
HOST = self.conn.get_Host(vmhost)
if VM.placement_policy.host is None:
self.conn.set_VM_Host(vmname, vmhost)
elif str(VM.placement_policy.host.id) != str(HOST.id):
self.conn.set_VM_Host(vmname, vmhost)
else:
setMsg("VM's startup host was already set to " + vmhost)
checkFail()
if str(VM.status.state) == "up":
self.conn.migrate_VM(vmname, vmhost)
checkFail()
return True
def setHost(self, hostname, cluster, ifaces):
self.__get_conn()
return self.conn.set_Host(hostname, cluster, ifaces)
def checkFail():
if failed:
module.fail_json(msg=msg)
else:
return True
def setFailed():
global failed
failed = True
def setChanged():
global changed
changed = True
def setMsg(message):
global failed
msg.append(message)
def core(module):
r = RHEV(module)
state = module.params.get('state', 'present')
if state == 'ping':
r.test()
return RHEV_SUCCESS, {"ping": "pong"}
elif state == 'info':
name = module.params.get('name')
if not name:
setMsg("`name` is a required argument.")
return RHEV_FAILED, msg
vminfo = r.getVM(name)
return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
elif state == 'present':
created = False
name = module.params.get('name')
if not name:
setMsg("`name` is a required argument.")
return RHEV_FAILED, msg
actiontype = module.params.get('type')
if actiontype == 'server' or actiontype == 'desktop':
vminfo = r.getVM(name)
if vminfo:
setMsg('VM exists')
else:
# Create VM
cluster = module.params.get('cluster')
if cluster is None:
setMsg("cluster is a required argument.")
setFailed()
template = module.params.get('image')
if template:
disks = module.params.get('disks')
if disks is None:
setMsg("disks is a required argument.")
setFailed()
checkFail()
if r.createVMimage(name, cluster, template, disks) is False:
return RHEV_FAILED, vminfo
else:
os = module.params.get('osver')
if os is None:
setMsg("osver is a required argument.")
setFailed()
checkFail()
if r.createVM(name, cluster, os, actiontype) is False:
return RHEV_FAILED, vminfo
created = True
# Set MEMORY and MEMORY POLICY
vminfo = r.getVM(name)
memory = module.params.get('vmmem')
if memory is not None:
memory_policy = module.params.get('mempol')
if memory_policy == 0:
memory_policy = memory
mem_pol_nok = True
if int(vminfo['mem_pol']) == memory_policy:
setMsg("Memory is correct")
mem_pol_nok = False
mem_nok = True
if int(vminfo['memory']) == memory:
setMsg("Memory is correct")
mem_nok = False
if memory_policy > memory:
setMsg('memory_policy cannot have a higher value than memory.')
return RHEV_FAILED, msg
if mem_nok and mem_pol_nok:
if memory_policy > int(vminfo['memory']):
r.setMemory(vminfo['name'], memory)
r.setMemoryPolicy(vminfo['name'], memory_policy)
else:
r.setMemoryPolicy(vminfo['name'], memory_policy)
r.setMemory(vminfo['name'], memory)
elif mem_nok:
r.setMemory(vminfo['name'], memory)
elif mem_pol_nok:
r.setMemoryPolicy(vminfo['name'], memory_policy)
checkFail()
# Set CPU
cpu = module.params.get('vmcpu')
if int(vminfo['cpu_cores']) == cpu:
setMsg("Number of CPUs is correct")
else:
if r.setCPU(vminfo['name'], cpu) is False:
return RHEV_FAILED, msg
# Set CPU SHARE
cpu_share = module.params.get('cpu_share')
if cpu_share is not None:
if int(vminfo['cpu_shares']) == cpu_share:
setMsg("CPU share is correct.")
else:
if r.setCPUShare(vminfo['name'], cpu_share) is False:
return RHEV_FAILED, msg
# Set DISKS
disks = module.params.get('disks')
if disks is not None:
if r.setDisks(vminfo['name'], disks) is False:
return RHEV_FAILED, msg
# Set NETWORKS
ifaces = module.params.get('ifaces', None)
if ifaces is not None:
if r.setNetworks(vminfo['name'], ifaces) is False:
return RHEV_FAILED, msg
# Set Delete Protection
del_prot = module.params.get('del_prot')
if r.setDeleteProtection(vminfo['name'], del_prot) is False:
return RHEV_FAILED, msg
# Set Boot Order
boot_order = module.params.get('boot_order')
if r.setBootOrder(vminfo['name'], boot_order) is False:
return RHEV_FAILED, msg
# Set VM Host
vmhost = module.params.get('vmhost')
if vmhost:
if r.setVMHost(vminfo['name'], vmhost) is False:
return RHEV_FAILED, msg
vminfo = r.getVM(name)
vminfo['created'] = created
return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
if actiontype == 'host':
cluster = module.params.get('cluster')
if cluster is None:
setMsg("cluster is a required argument.")
setFailed()
ifaces = module.params.get('ifaces')
if ifaces is None:
setMsg("ifaces is a required argument.")
setFailed()
if r.setHost(name, cluster, ifaces) is False:
return RHEV_FAILED, msg
return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
elif state == 'absent':
name = module.params.get('name')
if not name:
setMsg("`name` is a required argument.")
return RHEV_FAILED, msg
actiontype = module.params.get('type')
if actiontype == 'server' or actiontype == 'desktop':
vminfo = r.getVM(name)
if vminfo:
setMsg('VM exists')
# Set Delete Protection
del_prot = module.params.get('del_prot')
if r.setDeleteProtection(vminfo['name'], del_prot) is False:
return RHEV_FAILED, msg
# Remove VM
if r.removeVM(vminfo['name']) is False:
return RHEV_FAILED, msg
setMsg('VM has been removed.')
vminfo['state'] = 'DELETED'
else:
setMsg('VM was already removed.')
return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
elif state == 'up' or state == 'down' or state == 'restarted':
name = module.params.get('name')
if not name:
setMsg("`name` is a required argument.")
return RHEV_FAILED, msg
timeout = module.params.get('timeout')
if r.setPower(name, state, timeout) is False:
return RHEV_FAILED, msg
vminfo = r.getVM(name)
return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
elif state == 'cd':
name = module.params.get('name')
cd_drive = module.params.get('cd_drive')
if r.setCD(name, cd_drive) is False:
return RHEV_FAILED, msg
return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
def main():
global module
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'cd', 'down', 'info', 'ping', 'present', 'restarted', 'up']),
user=dict(type='str', default='admin@internal'),
password=dict(type='str', required=True, no_log=True),
server=dict(type='str', default='127.0.0.1'),
port=dict(type='int', default=443),
insecure_api=dict(type='bool', default=False),
name=dict(type='str'),
image=dict(type='str'),
datacenter=dict(type='str', default="Default"),
type=dict(type='str', default='server', choices=['desktop', 'host', 'server']),
cluster=dict(type='str', default=''),
vmhost=dict(type='str'),
vmcpu=dict(type='int', default=2),
vmmem=dict(type='int', default=1),
disks=dict(type='list'),
osver=dict(type='str', default="rhel_6x64"),
ifaces=dict(type='list', aliases=['interfaces', 'nics']),
timeout=dict(type='int'),
mempol=dict(type='int', default=1),
vm_ha=dict(type='bool', default=True),
cpu_share=dict(type='int', default=0),
boot_order=dict(type='list', default=['hd', 'network']),
del_prot=dict(type='bool', default=True),
cd_drive=dict(type='str'),
),
)
if not HAS_SDK:
module.fail_json(msg="The 'ovirtsdk' module is not importable. Check the requirements.")
rc = RHEV_SUCCESS
try:
rc, result = core(module)
except Exception as e:
module.fail_json(msg=str(e))
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
live-clones/dolfin-adjoint | timestepping/python/timestepping/pre_assembled_equations.py | 1 | 21818 | #!/usr/bin/env python2
# Copyright (C) 2011-2012 by Imperial College London
# Copyright (C) 2013 University of Oxford
# Copyright (C) 2014-2016 University of Edinburgh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3 of the License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import dolfin
import ufl
from .caches import *
from .equation_solvers import *
from .exceptions import *
from .fenics_overrides import *
from .fenics_utils import *
from .pre_assembled_forms import *
from .statics import *
__all__ = \
[
"PAEquationSolver",
"pa_solve"
]
class PAEquationSolver(EquationSolver):
"""
An EquationSolver applying additional pre-assembly and linear solver caching
optimisations. This utilises pre-assembly of static terms. The arguments match
those accepted by the DOLFIN solve function, with the following differences:
Argument 1: May be a general equation. Linear systems are detected
automatically.
initial_guess: The initial guess for an iterative solver.
adjoint_solver_parameters: A dictionary of linear solver parameters for an
adjoint equation solve.
"""
def __init__(self, *args, **kwargs):
args, kwargs = copy.copy(args), copy.copy(kwargs)
# Process arguments not to be passed to _extract_args
if "initial_guess" in kwargs:
if not kwargs["initial_guess"] is None and not isinstance(kwargs["initial_guess"], dolfin.Function):
raise InvalidArgumentException("initial_guess must be a Function")
initial_guess = kwargs["initial_guess"]
del(kwargs["initial_guess"])
else:
initial_guess = None
if "adjoint_solver_parameters" in kwargs:
if not kwargs["adjoint_solver_parameters"] is None and not isinstance(kwargs["adjoint_solver_parameters"], dict):
raise InvalidArgumentException("adjoint_solver_parameters must be a dictionary")
adjoint_solver_parameters = kwargs["adjoint_solver_parameters"]
del(kwargs["adjoint_solver_parameters"])
else:
adjoint_solver_parameters = None
if "pre_assembly_parameters" in kwargs:
pre_assembly_parameters = kwargs["pre_assembly_parameters"]
del(kwargs["pre_assembly_parameters"])
else:
pre_assembly_parameters = {}
# Process remaining arguments
if "form_compiler_parameters" in kwargs:
raise NotImplementedException("form_compiler_parameters argument not supported")
eq, x, bcs, J, tol, goal, form_parameters, solver_parameters = dolfin.fem.solving._extract_args(*args, **kwargs)
# Relax requirements on equation syntax
eq_lhs_rank = form_rank(eq.lhs)
if eq_lhs_rank == 1:
form = eq.lhs
if not is_zero_rhs(eq.rhs):
form -= eq.rhs
if x in ufl.algorithms.extract_coefficients(form):
if J is None:
J = derivative(form, x)
if x in ufl.algorithms.extract_coefficients(J):
# Non-linear solve
is_linear = False
else:
# Linear solve, rank 2 LHS
cache_info("Detected that solve for %s is linear" % x.name())
form = dolfin.replace(form, {x:dolfin.TrialFunction(x.function_space())})
eq = dolfin.lhs(form) == dolfin.rhs(form)
eq_lhs_rank = form_rank(eq.lhs)
assert(eq_lhs_rank == 2)
is_linear = True
else:
# Linear solve, rank 1 LHS
is_linear = True
elif eq_lhs_rank == 2:
form = eq.lhs
if not is_zero_rhs(eq.rhs):
form -= eq.rhs
if not x in ufl.algorithms.extract_coefficients(form):
# Linear solve, rank 2 LHS
eq = dolfin.lhs(form) == dolfin.rhs(form)
eq_lhs_rank = form_rank(eq.lhs)
assert(eq_lhs_rank == 2)
is_linear = True
else:
# ??
raise InvalidArgumentException("Invalid equation")
# Initial guess sanity checking
if is_linear:
if not "krylov_solver" in solver_parameters:
solver_parameters["krylov_solver"] = {}
def initial_guess_enabled():
return solver_parameters["krylov_solver"].get("nonzero_initial_guess", False)
def initial_guess_disabled():
return not solver_parameters["krylov_solver"].get("nonzero_initial_guess", True)
def enable_initial_guess():
solver_parameters["krylov_solver"]["nonzero_initial_guess"] = True
return
if initial_guess is None:
if initial_guess_enabled():
initial_guess = x
elif eq_lhs_rank == 1:
# Supplied an initial guess for a linear solve with a rank 1 LHS -
# ignore it
initial_guess = None
elif "linear_solver" in solver_parameters and not solver_parameters["linear_solver"] in ["direct", "lu"] and not dolfin.has_lu_solver_method(solver_parameters["linear_solver"]):
# Supplied an initial guess with a Krylov solver - check the
# initial_guess solver parameter
if initial_guess_disabled():
raise ParameterException("initial_guess cannot be set if nonzero_initial_guess solver parameter is False")
enable_initial_guess()
elif is_linear:
# Supplied an initial guess for a linear solve with an LU solver -
# ignore it
initial_guess = None
# Initialise
EquationSolver.__init__(self, eq, x, bcs,
solver_parameters = solver_parameters,
adjoint_solver_parameters = adjoint_solver_parameters,
pre_assembly_parameters = pre_assembly_parameters)
self.__args = args
self.__kwargs = kwargs
self.__J = J
self.__tol = tol
self.__goal = goal
self.__form_parameters = form_parameters
self.__initial_guess = initial_guess
# Assemble
self.reassemble()
return
def reassemble(self, *args):
"""
Reassemble the PAEquationSolver. If no arguments are supplied, reassemble
both the LHS and RHS. Otherwise, only reassemble the LHS or RHS if they
depend upon the supplied Constant s or Function s. Note that this does
not clear the assembly or linear solver caches -- hence if a static
Constant, Function, or DirichletBC is modified then one should clear the
caches before calling reassemble on the PAEquationSolver.
"""
x, eq, bcs, linear_solver_parameters, pre_assembly_parameters = self.x(), \
self.eq(), self.bcs(), self.linear_solver_parameters(), \
self.pre_assembly_parameters()
x_deps = self.dependencies()
a, L, linear_solver = None, None, None
if self.is_linear():
for dep in x_deps:
if dep is x:
raise DependencyException("Invalid non-linear solve")
def assemble_lhs():
eq_lhs_rank = form_rank(eq.lhs)
if eq_lhs_rank == 2:
static_bcs = n_non_static_bcs(bcs) == 0
static_form = is_static_form(eq.lhs)
if not pre_assembly_parameters["equations"]["symmetric_boundary_conditions"] and len(bcs) > 0 and static_bcs and static_form:
a = assembly_cache.assemble(eq.lhs,
bcs = bcs, symmetric_bcs = False)
cache_info("Pre-assembled LHS terms in solve for %s : 1" % x.name())
cache_info("Non-pre-assembled LHS terms in solve for %s: 0" % x.name())
linear_solver = linear_solver_cache.linear_solver(eq.lhs,
linear_solver_parameters,
bcs = bcs, symmetric_bcs = False,
a = a)
linear_solver.set_operator(a)
elif len(bcs) == 0 and static_form:
a = assembly_cache.assemble(eq.lhs)
cache_info("Pre-assembled LHS terms in solve for %s : 1" % x.name())
cache_info("Non-pre-assembled LHS terms in solve for %s: 0" % x.name())
linear_solver = linear_solver_cache.linear_solver(eq.lhs,
linear_solver_parameters,
a = a)
linear_solver.set_operator(a)
else:
a = PAForm(eq.lhs, pre_assembly_parameters = pre_assembly_parameters["bilinear_forms"])
cache_info("Pre-assembled LHS terms in solve for %s : %i" % (x.name(), a.n_pre_assembled()))
cache_info("Non-pre-assembled LHS terms in solve for %s: %i" % (x.name(), a.n_non_pre_assembled()))
linear_solver = linear_solver_cache.linear_solver(eq.lhs,
linear_solver_parameters, pre_assembly_parameters["bilinear_forms"],
static = a.is_static() and static_bcs,
bcs = bcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
else:
assert(eq_lhs_rank == 1)
a = PAForm(eq.lhs, pre_assembly_parameters = pre_assembly_parameters["linear_forms"])
cache_info("Pre-assembled LHS terms in solve for %s : %i" % (x.name(), a.n_pre_assembled()))
cache_info("Non-pre-assembled LHS terms in solve for %s: %i" % (x.name(), a.n_non_pre_assembled()))
linear_solver = None
return a, linear_solver
def assemble_rhs():
L = PAForm(eq.rhs, pre_assembly_parameters = pre_assembly_parameters["linear_forms"])
cache_info("Pre-assembled RHS terms in solve for %s : %i" % (x.name(), L.n_pre_assembled()))
cache_info("Non-pre-assembled RHS terms in solve for %s: %i" % (x.name(), L.n_non_pre_assembled()))
return L
if len(args) == 0:
a, linear_solver = assemble_lhs()
L = assemble_rhs()
else:
a, linear_solver = self.__a, self.__linear_solver
L = self.__L
lhs_cs = ufl.algorithms.extract_coefficients(eq.lhs)
rhs_cs = ufl.algorithms.extract_coefficients(eq.rhs)
for dep in args:
if dep in lhs_cs:
a, linear_solver = assemble_lhs()
break
for dep in args:
if dep in rhs_cs:
L = assemble_rhs()
break
elif self.solver_parameters().get("nonlinear_solver", "newton") == "newton":
J, hbcs = self.J(), self.hbcs()
def assemble_lhs():
a = PAForm(J, pre_assembly_parameters = pre_assembly_parameters["bilinear_forms"])
cache_info("Pre-assembled LHS terms in solve for %s : %i" % (x.name(), a.n_pre_assembled()))
cache_info("Non-pre-assembled LHS terms in solve for %s: %i" % (x.name(), a.n_non_pre_assembled()))
linear_solver = linear_solver_cache.linear_solver(J,
linear_solver_parameters, pre_assembly_parameters["bilinear_forms"],
static = False,
bcs = hbcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
return a, linear_solver
def assemble_rhs():
L = -eq.lhs
if not is_zero_rhs(eq.rhs):
L += eq.rhs
L = PAForm(L, pre_assembly_parameters = pre_assembly_parameters["linear_forms"])
cache_info("Pre-assembled RHS terms in solve for %s : %i" % (x.name(), L.n_pre_assembled()))
cache_info("Non-pre-assembled RHS terms in solve for %s: %i" % (x.name(), L.n_non_pre_assembled()))
return L
if len(args) == 0:
a, linear_solver = assemble_lhs()
L = assemble_rhs()
else:
a, linear_solver = self.__a, self.__linear_solver
L = self.__L
lhs_cs = set(ufl.algorithms.extract_coefficients(J))
rhs_cs = set(ufl.algorithms.extract_coefficients(eq.lhs))
if not is_zero_rhs(eq.rhs):
rhs_cs.update(ufl.algorithms.extract_coefficients(eq.rhs))
for dep in args:
if dep in lhs_cs:
a, linear_solver = assemble_lhs()
break
for dep in args:
if dep in rhs_cs:
L = assemble_rhs()
break
self.__dx = x.vector().copy()
self.__a, self.__L, self.__linear_solver = a, L, linear_solver
return
def dependencies(self, non_symbolic = False):
"""
Return equation dependencies. If non_symbolic is true, also return any
other dependencies which could alter the result of a solve, such as the
initial guess.
"""
if not non_symbolic:
return EquationSolver.dependencies(self, non_symbolic = False)
elif not self.__initial_guess is None:
deps = copy.copy(EquationSolver.dependencies(self, non_symbolic = True))
deps.add(self.__initial_guess)
return deps
else:
return EquationSolver.dependencies(self, non_symbolic = True)
def linear_solver(self):
"""
Return the linear solver.
"""
return self.__linear_solver
def solve(self):
"""
Solve the equation
"""
x, pre_assembly_parameters = self.x(), self.pre_assembly_parameters()
if not self.__initial_guess is None and not self.__initial_guess is x:
x.assign(self.__initial_guess)
if self.is_linear():
bcs, linear_solver = self.bcs(), self.linear_solver()
if isinstance(self.__a, dolfin.GenericMatrix):
L = assemble(self.__L, copy = len(bcs) > 0)
enforce_bcs(L, bcs)
linear_solver.solve(x.vector(), L)
elif self.__a.rank() == 2:
a = assemble(self.__a, copy = len(bcs) > 0)
L = assemble(self.__L, copy = len(bcs) > 0)
apply_bcs(a, bcs, L = L, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(a)
linear_solver.solve(x.vector(), L)
else:
assert(self.__a.rank() == 1)
assert(linear_solver is None)
a = assemble(self.__a, copy = False)
L = assemble(self.__L, copy = False)
assert(L.local_range() == a.local_range())
x.vector().set_local(L.array() / a.array())
x.vector().apply("insert")
enforce_bcs(x.vector(), bcs)
elif self.solver_parameters().get("nonlinear_solver", "newton") == "newton":
# Newton solver, intended to have near identical behaviour to the Newton
# solver supplied with DOLFIN. See
# http://fenicsproject.org/documentation/tutorial/nonlinear.html for
# further details.
default_parameters = dolfin.NewtonSolver.default_parameters()
solver_parameters = self.solver_parameters()
if "newton_solver" in solver_parameters:
parameters = solver_parameters["newton_solver"]
else:
parameters = {}
linear_solver = self.linear_solver()
atol = default_parameters["absolute_tolerance"]
rtol = default_parameters["relative_tolerance"]
max_its = default_parameters["maximum_iterations"]
omega = default_parameters["relaxation_parameter"]
err = default_parameters["error_on_nonconvergence"]
r_def = default_parameters["convergence_criterion"]
for key in parameters.keys():
if key == "absolute_tolerance":
atol = parameters[key]
elif key == "convergence_criterion":
r_def = parameters[key]
elif key == "error_on_nonconvergence":
err = parameters[key]
elif key == "maximum_iterations":
max_its = parameters[key]
elif key == "relative_tolerance":
rtol = parameters[key]
elif key == "relaxation_parameter":
omega = parameters[key]
elif key in ["linear_solver", "preconditioner", "lu_solver", "krylov_solver"]:
pass
elif key in ["method", "report"]:
raise NotImplementedException("Unsupported Newton solver parameter: %s" % key)
else:
raise ParameterException("Unexpected Newton solver parameter: %s" % key)
eq, bcs, hbcs = self.eq(), self.bcs(), self.hbcs()
a, L = self.__a, self.__L
x_name = x.name()
x = x.vector()
enforce_bcs(x, bcs)
dx = self.__dx
if not isinstance(linear_solver, dolfin.GenericLUSolver):
dx.zero()
if r_def == "residual":
l_L = assemble(L, copy = len(hbcs) > 0)
enforce_bcs(l_L, hbcs)
r_0 = l_L.norm("l2")
it = 0
if r_0 >= atol:
l_a = assemble(a, copy = len(hbcs) > 0)
apply_bcs(l_a, hbcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(l_a)
linear_solver.solve(dx, l_L)
x.axpy(omega, dx)
it += 1
atol = max(atol, r_0 * rtol)
while it < max_its:
l_L = assemble(L, copy = len(hbcs) > 0)
enforce_bcs(l_L, hbcs)
r = l_L.norm("l2")
if r < atol:
break
l_a = assemble(a, copy = len(hbcs) > 0)
apply_bcs(l_a, hbcs, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(l_a)
linear_solver.solve(dx, l_L)
x.axpy(omega, dx)
it += 1
elif r_def == "incremental":
l_a = assemble(a, copy = len(hbcs) > 0)
l_L = assemble(L, copy = len(hbcs) > 0)
apply_bcs(l_a, hbcs, L = l_L, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(l_a)
linear_solver.solve(dx, l_L)
x.axpy(omega, dx)
it = 1
r_0 = dx.norm("l2")
if r_0 >= atol:
atol = max(atol, rtol * r_0)
while it < max_its:
l_a = assemble(a, copy = len(hbcs) > 0)
l_L = assemble(L, copy = len(hbcs) > 0)
apply_bcs(l_a, hbcs, L = l_L, symmetric_bcs = pre_assembly_parameters["equations"]["symmetric_boundary_conditions"])
linear_solver.set_operator(l_a)
linear_solver.solve(dx, l_L)
x.axpy(omega, dx)
it += 1
if dx.norm("l2") < atol:
break
else:
raise ParameterException("Invalid convergence criterion: %s" % r_def)
if it == max_its:
if err:
raise StateException("Newton solve for %s failed to converge after %i iterations" % (x_name, it))
else:
dolfin.warning("Newton solve for %s failed to converge after %i iterations" % (x_name, it))
# dolfin.info("Newton solve for %s converged after %i iterations" % (x_name, it))
else:
problem = dolfin.NonlinearVariationalProblem(self.eq().lhs - self.eq().rhs, x, bcs = self.bcs(), J = self.J())
nl_solver = dolfin.NonlinearVariationalSolver(problem)
nl_solver.parameters.update(self.solver_parameters())
nl_solver.solve()
return
def pa_solve(*args, **kwargs):
"""
Instantiate a PAEquationSolver using the supplied arguments and call its solve
method.
"""
PAEquationSolver(*args, **kwargs).solve()
return
| lgpl-3.0 |
trnewman/VT-USRP-daughterboard-drivers_python | gnuradio-core/src/lib/filter/generate_gr_fir_sysconfig.py | 1 | 3066 | #!/bin/env python
# -*- python -*-
#
# Copyright 2003 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from generate_utils import *
# ----------------------------------------------------------------
def make_gr_fir_sysconfig_h ():
out = open_and_log_name ('gr_fir_sysconfig.h', 'w')
out.write (copyright)
out.write (
'''
/*
* WARNING: This file is automatically generated by generate_gr_fir_sysconfig.py
* Any changes made to this file will be overwritten.
*/
#ifndef INCLUDED_GR_FIR_SYSCONFIG_H
#define INCLUDED_GR_FIR_SYSCONFIG_H
#include <gr_types.h>
''')
# for sig in fir_signatures:
# out.write ('class gr_fir_' + sig + ';\n')
out.write ('#include <gr_fir_util.h>\n')
out.write (
'''
/*!
* \\brief abstract base class for configuring the automatic selection of the
* fastest gr_fir for your platform.
*
* This is used internally by gr_fir_util.
*/
class gr_fir_sysconfig {
public:
virtual ~gr_fir_sysconfig ();
''')
for sig in fir_signatures:
out.write ((' virtual gr_fir_%s *create_gr_fir_%s (const std::vector<%s> &taps) = 0;\n' %
(sig, sig, tap_type (sig))))
out.write ('\n')
for sig in fir_signatures:
out.write ((' virtual void get_gr_fir_%s_info (std::vector<gr_fir_%s_info> *info) = 0;\n' %
(sig, sig)))
out.write (
'''
};
/*
* This returns the single instance of the appropriate derived class.
* This function must be defined only once in the system, and should be defined
* in the platform specific code.
*/
gr_fir_sysconfig *gr_fir_sysconfig_singleton ();
#endif /* INCLUDED_GR_FIR_SYSCONFIG_H */
''')
out.close ()
# ----------------------------------------------------------------
def make_gr_fir_sysconfig_cc ():
out = open_and_log_name ('gr_fir_sysconfig.cc', 'w')
out.write (copyright)
out.write (
'''
/*
* WARNING: This file is automatically generated by generate_gr_fir_sysconfig.py
* Any changes made to this file will be overwritten.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <gr_fir_sysconfig.h>
gr_fir_sysconfig::~gr_fir_sysconfig ()
{
}
''')
out.close ()
# ----------------------------------------------------------------
def generate ():
make_gr_fir_sysconfig_h ()
make_gr_fir_sysconfig_cc ()
if __name__ == '__main__':
generate ()
| gpl-3.0 |
Hiestaa/3D-Lsystem | Vector.py | 1 | 1792 | class Vector:
"""represente un vecteur 3d"""
def __init__(self, arg = (0, 0, 0)):
self.x = float(arg[0])
self.y = float(arg[1])
self.z = float(arg[2])
def set(self, val):
if isinstance(val, self.__class__):
self.x = val.x
self.y = val.y
self.z = val.z
else:
self.x = val[0]
self.y = val[1]
self.z = val[2]
return self;
def toString(self):
return "(" + str(self.x) + ", " + str(self.y) + ", " + str(self.z) + ")"
def __mul__(self, other):
if isinstance(other, self.__class__):
return Vector((self.x * other.x, self.y * other.y, self.z * other.z))
else:
return Vector((self.x * other, self.y * other, self.z * other))
def __rmul__(self, other):
if isinstance(other, self.__class__):
return Vector((self.x * other.x, self.y * other.y, self.z * other.z))
else:
return Vector((self.x * other, self.y * other, self.z * other))
def __imul__(self, other):
if isinstance(other, self.__class__):
self.x *= other.x
self.y *= other.y
self.z *= other.z
else:
self.x *= other
self.y *= other
self.z *= other
return self
def __add__(self, other):
if isinstance(other, self.__class__):
return Vector((self.x + other.x, self.y + other.y, self.z + other.z))
else:
return Vector((self.x + other, self.y + other, self.z + other))
def __radd__(self, other):
if isinstance(other, self.__class__):
return Vector((self.x + other.x, self.y + other.y, self.z + other.z))
else:
return Vector((self.x + other, self.y + other, self.z + other))
def __iadd__(self, other):
if isinstance(other, self.__class__):
self.x += other.x
self.y += other.y
self.z += other.z
else:
self.x += other
self.y += other
self.z += other
return self
def toTuple(self):
return (self.x, self.y, self.z) | mit |
SymbiFlow/prjxray | minitests/litex/uart_ddr/arty/scripts/arty.py | 1 | 4274 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
# This file is Copyright (c) 2015-2020 Florent Kermarrec <florent@enjoy-digital.fr>
# License: BSD
import argparse
from migen import *
from litex_boards.platforms import arty
from litex.build.xilinx import VivadoProgrammer
from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict
from litex.soc.cores.clock import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litedram.init import get_sdram_phy_py_header
from litedram.modules import MT41K128M16
from litedram.phy import s7ddrphy
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_sys4x_dqs = ClockDomain(reset_less=True)
self.clock_domains.cd_clk200 = ClockDomain()
# # #
self.submodules.pll = pll = S7PLL(speedgrade=-1)
self.comb += pll.reset.eq(~platform.request("cpu_reset"))
pll.register_clkin(platform.request("clk100"), 100e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys4x, 4 * sys_clk_freq)
pll.create_clkout(self.cd_sys4x_dqs, 4 * sys_clk_freq, phase=90)
pll.create_clkout(self.cd_clk200, 200e6)
self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_clk200)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCSDRAM):
def __init__(self):
platform = arty.Platform()
sys_clk_freq = int(50e6)
# SoCSDRAM ---------------------------------------------------------------------------------
SoCSDRAM.__init__(
self,
platform,
clk_freq=sys_clk_freq,
ident="Minimal Arty DDR3 Design for tests with Project X-Ray",
ident_version=True,
cpu_type=None,
l2_size=16,
uart_name="bridge")
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# DDR3 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = s7ddrphy.A7DDRPHY(
platform.request("ddram"),
memtype="DDR3",
nphases=4,
sys_clk_freq=sys_clk_freq)
self.add_csr("ddrphy")
sdram_module = MT41K128M16(sys_clk_freq, "1:4")
self.register_sdram(
self.ddrphy,
geom_settings=sdram_module.geom_settings,
timing_settings=sdram_module.timing_settings)
def generate_sdram_phy_py_header(self):
f = open("sdram_init.py", "w")
f.write(
get_sdram_phy_py_header(
self.sdram.controller.settings.phy,
self.sdram.controller.settings.timing))
f.close()
# Load ---------------------------------------------------------------------------------------------
def load():
prog = VivadoProgrammer()
prog.load_bitstream("build/gateware/top.bit")
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description="Minimal Arty DDR3 Design for tests with Project X-Ray")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--load", action="store_true", help="Load bitstream")
args = parser.parse_args()
if args.load:
load()
soc = BaseSoC()
builder = Builder(soc, output_dir="build", csr_csv="csr.csv")
builder.build(run=args.build)
soc.generate_sdram_phy_py_header()
if __name__ == "__main__":
main()
| isc |
chenyu105/linux | tools/perf/scripts/python/failed-syscalls-by-pid.py | 1996 | 2233 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
barracuda7/android_kernel_samsung_exynos5420 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
gvlproject/tools-iuc | tools/scikit-bio/scikit_bio_diversity_beta_diversity.py | 9 | 3774 | #!/usr/bin/env python
# Reports a beta diversity matrix for tabular input file
# using scikit-bio
# Daniel Blankenberg
import codecs
import optparse
import sys
from skbio import TreeNode
from skbio.diversity import beta_diversity
__VERSION__ = "0.0.1"
DELIMITER = '\t'
NEEDS_TREE = [ 'unweighted_unifrac', 'weighted_unifrac' ]
NEEDS_OTU_NAMES = [ 'unweighted_unifrac', 'weighted_unifrac' ]
def __main__():
parser = optparse.OptionParser( usage="%prog [options]" )
parser.add_option( '-v', '--version', dest='version', action='store_true', default=False, help='print version and exit' )
parser.add_option( '-i', '--input', dest='input', action='store', type="string", default=None, help='Input abundance Filename' )
parser.add_option( '', '--otu_column', dest='otu_column', action='store', type="int", default=None, help='OTU ID Column (1 based)' )
parser.add_option( '', '--sample_columns', dest='sample_columns', action='store', type="string", default=None, help='Comma separated list of sample columns, unset to use all.' )
parser.add_option( '', '--header', dest='header', action='store_true', default=False, help='Abundance file has a header line' )
parser.add_option( '', '--distance_metric', dest='distance_metric', action='store', type="string", default=None, help='Distance metric to use' )
parser.add_option( '', '--tree', dest='tree', action='store', type="string", default=None, help='Newick Tree Filename' )
parser.add_option( '-o', '--output', dest='output', action='store', type="string", default=None, help='Output Filename' )
(options, args) = parser.parse_args()
if options.version:
print >> sys.stderr, "scikit-bio betadiversity from tabular file", __VERSION__
sys.exit()
if options.otu_column is not None:
otu_column = options.otu_column - 1
else:
otu_column = None
if options.sample_columns is None:
with open( options.input, 'rb' ) as fh:
line = fh.readline()
columns = range( len( line.split( DELIMITER ) ) )
if otu_column in columns:
columns.remove( otu_column )
else:
columns = map( lambda x: int( x ) - 1, options.sample_columns.split( "," ) )
max_col = max( columns + [otu_column] )
counts = [ [] for x in columns ]
sample_names = []
otu_names = []
with open( options.input, 'rb' ) as fh:
if options.header:
header = fh.readline().rstrip('\n\r').split( DELIMITER )
sample_names = [ header[i] for i in columns ]
else:
sample_names = [ "SAMPLE_%i" % x for x in range( len( columns ) ) ]
for i, line in enumerate( fh ):
fields = line.rstrip('\n\r').split( DELIMITER )
if len(fields) <= max_col:
print >> sys.stederr, "Bad data line: ", fields
continue
if otu_column is not None:
otu_names.append( fields[ otu_column ] )
else:
otu_names.append( "OTU_%i" % i )
for j, col in enumerate( columns ):
counts[ j ].append( int( fields[ col ] ) )
extra_kwds = {}
if options.distance_metric in NEEDS_OTU_NAMES:
extra_kwds['otu_ids'] = otu_names
if options.distance_metric in NEEDS_TREE:
assert options.tree, Exception( "You must provide a newick tree when using '%s'" % options.distance_metric )
# NB: TreeNode apparently needs unicode files
with codecs.open( options.tree, 'rb', 'utf-8' ) as fh:
extra_kwds['tree'] = TreeNode.read( fh )
bd_dm = beta_diversity( options.distance_metric, counts, ids=sample_names, **extra_kwds )
bd_dm.write( options.output )
if __name__ == "__main__":
__main__()
| mit |
unicri/edx-platform | openedx/core/djangoapps/user_api/accounts/image_helpers.py | 40 | 5281 | """
Helper functions for the accounts API.
"""
import hashlib
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.files.storage import get_storage_class
from staticfiles.storage import staticfiles_storage
from microsite_configuration import microsite
from student.models import UserProfile
from ..errors import UserNotFound
PROFILE_IMAGE_FILE_EXTENSION = 'jpg' # All processed profile images are converted to JPEGs
PROFILE_IMAGE_SIZES_MAP = {
'full': 500,
'large': 120,
'medium': 50,
'small': 30
}
_PROFILE_IMAGE_SIZES = PROFILE_IMAGE_SIZES_MAP.values()
def get_profile_image_storage():
"""
Configures and returns a django Storage instance that can be used
to physically locate, read and write profile images.
"""
config = settings.PROFILE_IMAGE_BACKEND
storage_class = get_storage_class(config['class'])
return storage_class(**config['options'])
def _make_profile_image_name(username):
"""
Returns the user-specific part of the image filename, based on a hash of
the username.
"""
return hashlib.md5(settings.PROFILE_IMAGE_SECRET_KEY + username).hexdigest()
def _get_profile_image_filename(name, size, file_extension=PROFILE_IMAGE_FILE_EXTENSION):
"""
Returns the full filename for a profile image, given the name and size.
"""
return '{name}_{size}.{file_extension}'.format(name=name, size=size, file_extension=file_extension)
def _get_profile_image_urls(name, storage, file_extension=PROFILE_IMAGE_FILE_EXTENSION, version=None):
"""
Returns a dict containing the urls for a complete set of profile images,
keyed by "friendly" name (e.g. "full", "large", "medium", "small").
"""
def _make_url(size): # pylint: disable=missing-docstring
url = storage.url(
_get_profile_image_filename(name, size, file_extension=file_extension)
)
return '{}?v={}'.format(url, version) if version is not None else url
return {size_display_name: _make_url(size) for size_display_name, size in PROFILE_IMAGE_SIZES_MAP.items()}
def get_profile_image_names(username):
"""
Returns a dict containing the filenames for a complete set of profile
images, keyed by pixel size.
"""
name = _make_profile_image_name(username)
return {size: _get_profile_image_filename(name, size) for size in _PROFILE_IMAGE_SIZES}
def get_profile_image_urls_for_user(user):
"""
Return a dict {size:url} for each profile image for a given user.
Notes:
- this function does not determine whether the set of profile images
exists, only what the URLs will be if they do exist. It is assumed that
callers will use `_get_default_profile_image_urls` instead to provide
a set of urls that point to placeholder images, when there are no user-
submitted images.
- based on the value of django.conf.settings.PROFILE_IMAGE_BACKEND,
the URL may be relative, and in that case the caller is responsible for
constructing the full URL if needed.
Arguments:
user (django.contrib.auth.User): the user for whom we are getting urls.
Returns:
dictionary of {size_display_name: url} for each image.
"""
if user.profile.has_profile_image:
return _get_profile_image_urls(
_make_profile_image_name(user.username),
get_profile_image_storage(),
version=user.profile.profile_image_uploaded_at.strftime("%s"),
)
else:
return _get_default_profile_image_urls()
def _get_default_profile_image_urls():
"""
Returns a dict {size:url} for a complete set of default profile images,
used as a placeholder when there are no user-submitted images.
TODO The result of this function should be memoized, but not in tests.
"""
return _get_profile_image_urls(
microsite.get_value('PROFILE_IMAGE_DEFAULT_FILENAME', settings.PROFILE_IMAGE_DEFAULT_FILENAME),
staticfiles_storage,
file_extension=settings.PROFILE_IMAGE_DEFAULT_FILE_EXTENSION,
)
def set_has_profile_image(username, is_uploaded, upload_dt=None):
"""
System (not user-facing) API call used to store whether the user has
uploaded a profile image, and if so, when. Used by profile_image API.
Arguments:
username (django.contrib.auth.User.username): references the user who
uploaded an image.
is_uploaded (bool): whether or not the user has an uploaded profile
image.
upload_dt (datetime.datetime): If `is_uploaded` is True, this should
contain the server-side date+time of the upload. If `is_uploaded`
is False, the parameter is optional and will be ignored.
Raises:
ValueError: is_uploaded was True, but no upload datetime was supplied.
UserNotFound: no user with username `username` exists.
"""
if is_uploaded and upload_dt is None:
raise ValueError("No upload datetime was supplied.")
elif not is_uploaded:
upload_dt = None
try:
profile = UserProfile.objects.get(user__username=username)
except ObjectDoesNotExist:
raise UserNotFound()
profile.profile_image_uploaded_at = upload_dt
profile.save()
| agpl-3.0 |
sodafree/backend | django/contrib/auth/management/__init__.py | 62 | 4659 | """
Creates permissions for all installed apps that need permissions.
"""
import getpass
import locale
import unicodedata
from django.contrib.auth import models as auth_app
from django.db.models import get_models, signals
from django.contrib.auth.models import User
def _get_permission_codename(action, opts):
return u'%s_%s' % (action, opts.object_name.lower())
def _get_all_permissions(opts):
"Returns (codename, name) for all permissions in the given opts."
perms = []
for action in ('add', 'change', 'delete'):
perms.append((_get_permission_codename(action, opts), u'Can %s %s' % (action, opts.verbose_name_raw)))
return perms + list(opts.permissions)
def create_permissions(app, created_models, verbosity, **kwargs):
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_models:
ctype = ContentType.objects.get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a context_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
all_perms = set(auth_app.Permission.objects.filter(
content_type__in=ctypes,
).values_list(
"content_type", "codename"
))
objs = [
auth_app.Permission(codename=codename, name=name, content_type=ctype)
for ctype, (codename, name) in searched_perms
if (ctype.pk, codename) not in all_perms
]
auth_app.Permission.objects.bulk_create(objs)
if verbosity >= 2:
for obj in objs:
print "Adding permission '%s'" % obj
def create_superuser(app, created_models, verbosity, db, **kwargs):
from django.core.management import call_command
if auth_app.User in created_models and kwargs.get('interactive', True):
msg = ("\nYou just installed Django's auth system, which means you "
"don't have any superusers defined.\nWould you like to create one "
"now? (yes/no): ")
confirm = raw_input(msg)
while 1:
if confirm not in ('yes', 'no'):
confirm = raw_input('Please enter either "yes" or "no": ')
continue
if confirm == 'yes':
call_command("createsuperuser", interactive=True, database=db)
break
def get_system_username():
"""
Try to determine the current system user's username.
:returns: The username as a unicode string, or an empty string if the
username could not be determined.
"""
try:
return getpass.getuser().decode(locale.getdefaultlocale()[1])
except (ImportError, KeyError, UnicodeDecodeError):
# KeyError will be raised by os.getpwuid() (called by getuser())
# if there is no corresponding entry in the /etc/passwd file
# (a very restricted chroot environment, for example).
# UnicodeDecodeError - preventive treatment for non-latin Windows.
return u''
def get_default_username(check_db=True):
"""
Try to determine the current system user's username to use as a default.
:param check_db: If ``True``, requires that the username does not match an
existing ``auth.User`` (otherwise returns an empty string).
:returns: The username, or an empty string if no username can be
determined.
"""
from django.contrib.auth.management.commands.createsuperuser import (
RE_VALID_USERNAME)
default_username = get_system_username()
try:
default_username = unicodedata.normalize('NFKD', default_username)\
.encode('ascii', 'ignore').replace(' ', '').lower()
except UnicodeDecodeError:
return ''
if not RE_VALID_USERNAME.match(default_username):
return ''
# Don't return the default username if it is already taken.
if check_db and default_username:
try:
User.objects.get(username=default_username)
except User.DoesNotExist:
pass
else:
return ''
return default_username
signals.post_syncdb.connect(create_permissions,
dispatch_uid = "django.contrib.auth.management.create_permissions")
signals.post_syncdb.connect(create_superuser,
sender=auth_app, dispatch_uid = "django.contrib.auth.management.create_superuser")
| bsd-3-clause |
windmill/windmill | windmill/authoring/unit.py | 2 | 2026 | # Copyright (c) 2008-2009 Mikeal Rogers <mikeal.rogers@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
from windmill.dep import functest
reports = functest.reports
class UnitTestReporter(reports.FunctestReportInterface):
def summary(self, test_list, totals_dict, stdout_capture):
self.test_list = test_list
unittestreporter = UnitTestReporter()
reports.register_reporter(unittestreporter)
class WindmillUnitTestCase(unittest.TestCase):
def setUp(self):
import windmill
windmill.stdout, windmill.stdin = sys.stdout, sys.stdin
from windmill.bin.admin_lib import configure_global_settings, setup
configure_global_settings()
windmill.settings['TEST_URL'] = self.test_url
if hasattr(self,"windmill_settings"):
for (setting,value) in self.windmill_settings.iteritems():
windmill.settings[setting] = value
self.windmill_shell_objects = setup()
def testWindmill(self):
self.windmill_shell_objects['start_'+self.browser]()
self.windmill_shell_objects['do_test'](self.test_dir, threaded=False)
for test in unittestreporter.test_list:
self._testMethodDoc = getattr(test, "__doc__", None)
self._testMethodName = test.__name__
self.assertEquals(test.result, True)
def tearDown(self):
from windmill.bin.admin_lib import teardown
teardown(self.windmill_shell_objects) | apache-2.0 |
kapil1garg/eecs338-chris-jones | show_query.py | 1 | 3440 | import json
import elastic
from operator import itemgetter
from default_query import DefaultQuery
class ShowQuery(DefaultQuery):
"""
Handles ES queries related to shows
"""
def __init__(self):
DefaultQuery.__init__(self)
def generate_response_best_show(self, query, annotated_query):
# find document id with max polarity
payload = {
'_source': ['documentSentiment.polarity'],
'query': {
'bool': {
'must': [{
'match': {
'Full text:': p
}}
for p in annotated_query.shows]
}
}
}
r = json.loads(elastic.search(elastic.ES_URL, '/flattened-articles/_search', payload))['hits']['hits']
polarities = [(i['_id'], i['_source']['documentSentiment']['polarity']) for i in r]
id_max_polarity = max(polarities, key=itemgetter(1))[0]
# return sentence from document id that contains show in a sentence
payload = {
'_source': ['sentences.content', 'Full text:', 'ProQ:'],
'query': {
'bool': {
'must': [{
'ids': {
'values': [id_max_polarity]
}},
{'nested': {
'path': 'sentences',
'query': {
'bool': {
'must': [{'match': {'sentences.content': p}} for p in annotated_query.shows]
}
},
'inner_hits': {}
}}]
}
}
}
r = json.loads(elastic.search(elastic.ES_URL, '/flattened-articles/_search', payload))['hits']['hits']
r = [(i['inner_hits']['sentences']['hits'], i['_source']['ProQ:'], i['_source']['Full text:']) for i in r]
return self.format_response(r[0])
def generate_response_person_in_show(self, query, annotated_query):
match_queries = [{
'match': {
'Full text:': show
}
}
for show in annotated_query.shows
]
match_queries.append({
'nested': {
'path': 'sentences',
'query': {
'bool': {
'must': [{
'match': {
'sentences.content': p
}
}
for p in annotated_query.people
]
}
},
'inner_hits': {}
}
})
payload = {
'_source': ['sentences.content', 'Full text:', 'ProQ:'],
'query': {
'bool': {
'must': match_queries
}
}
}
r = json.loads(elastic.search(elastic.ES_URL, '/flattened-articles/_search', payload))
print r
r = r['hits']['hits']
r = [(i['inner_hits']['sentences']['hits'], i['_source']['ProQ:'], i['_source']['Full text:']) for i in r]
return self.format_response(r[0])
| mit |
jakevdp/altair | altair/utils/server.py | 1 | 4035 | """
A Simple server used to show altair graphics from a prompt or script.
This is adapted from the mpld3 package; see
https://github.com/mpld3/mpld3/blob/master/mpld3/_server.py
"""
import sys
import threading
import webbrowser
import socket
import itertools
import random
from ._py3k_compat import server, IO
JUPYTER_WARNING = """
Note: if you're in the Jupyter notebook, Chart.serve() is not the best
way to view plots. Consider using Chart.display().
You must interrupt the kernel to cancel this command.
"""
# Mock server used for testing
class MockRequest(object):
def makefile(self, *args, **kwargs):
return IO(b"GET /")
def sendall(self, response):
pass
class MockServer(object):
def __init__(self, ip_port, Handler):
Handler(MockRequest(), ip_port[0], self)
def serve_forever(self):
pass
def server_close(self):
pass
def generate_handler(html, files=None):
if files is None:
files = {}
class MyHandler(server.BaseHTTPRequestHandler):
def do_GET(self):
"""Respond to a GET request."""
if self.path == '/':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(html.encode())
elif self.path in files:
content_type, content = files[self.path]
self.send_response(200)
self.send_header("Content-type", content_type)
self.end_headers()
self.wfile.write(content.encode())
else:
self.send_error(404)
return MyHandler
def find_open_port(ip, port, n=50):
"""Find an open port near the specified port"""
ports = itertools.chain((port + i for i in range(n)),
(port + random.randint(-2 * n, 2 * n)))
for port in ports:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = s.connect_ex((ip, port))
s.close()
if result != 0:
return port
raise ValueError("no open ports found")
def serve(html, ip='127.0.0.1', port=8888, n_retries=50, files=None,
jupyter_warning=True, open_browser=True, http_server=None):
"""Start a server serving the given HTML, and (optionally) open a browser
Parameters
----------
html : string
HTML to serve
ip : string (default = '127.0.0.1')
ip address at which the HTML will be served.
port : int (default = 8888)
the port at which to serve the HTML
n_retries : int (default = 50)
the number of nearby ports to search if the specified port is in use.
files : dictionary (optional)
dictionary of extra content to serve
jupyter_warning : bool (optional)
if True (default), then print a warning if this is used within Jupyter
open_browser : bool (optional)
if True (default), then open a web browser to the given HTML
http_server : class (optional)
optionally specify an HTTPServer class to use for showing the
figure. The default is Python's basic HTTPServer.
"""
port = find_open_port(ip, port, n_retries)
Handler = generate_handler(html, files)
if http_server is None:
srvr = server.HTTPServer((ip, port), Handler)
else:
srvr = http_server((ip, port), Handler)
if jupyter_warning:
try:
__IPYTHON__ # noqa
except:
pass
else:
print(JUPYTER_WARNING)
# Start the server
print("Serving to http://{}:{}/ [Ctrl-C to exit]".format(ip, port))
sys.stdout.flush()
if open_browser:
# Use a thread to open a web browser pointing to the server
b = lambda: webbrowser.open('http://{}:{}'.format(ip, port))
threading.Thread(target=b).start()
try:
srvr.serve_forever()
except (KeyboardInterrupt, SystemExit):
print("\nstopping Server...")
srvr.server_close()
| bsd-3-clause |
Jens-G/thrift | lib/py/src/transport/TZlibTransport.py | 47 | 8778 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""TZlibTransport provides a compressed transport and transport factory
class, using the python standard library zlib module to implement
data compression.
"""
from __future__ import division
import zlib
from .TTransport import TTransportBase, CReadableTransport
from ..compat import BufferIO
class TZlibTransportFactory(object):
"""Factory transport that builds zlib compressed transports.
This factory caches the last single client/transport that it was passed
and returns the same TZlibTransport object that was created.
This caching means the TServer class will get the _same_ transport
object for both input and output transports from this factory.
(For non-threaded scenarios only, since the cache only holds one object)
The purpose of this caching is to allocate only one TZlibTransport where
only one is really needed (since it must have separate read/write buffers),
and makes the statistics from getCompSavings() and getCompRatio()
easier to understand.
"""
# class scoped cache of last transport given and zlibtransport returned
_last_trans = None
_last_z = None
def getTransport(self, trans, compresslevel=9):
"""Wrap a transport, trans, with the TZlibTransport
compressed transport class, returning a new
transport to the caller.
@param compresslevel: The zlib compression level, ranging
from 0 (no compression) to 9 (best compression). Defaults to 9.
@type compresslevel: int
This method returns a TZlibTransport which wraps the
passed C{trans} TTransport derived instance.
"""
if trans == self._last_trans:
return self._last_z
ztrans = TZlibTransport(trans, compresslevel)
self._last_trans = trans
self._last_z = ztrans
return ztrans
class TZlibTransport(TTransportBase, CReadableTransport):
"""Class that wraps a transport with zlib, compressing writes
and decompresses reads, using the python standard
library zlib module.
"""
# Read buffer size for the python fastbinary C extension,
# the TBinaryProtocolAccelerated class.
DEFAULT_BUFFSIZE = 4096
def __init__(self, trans, compresslevel=9):
"""Create a new TZlibTransport, wrapping C{trans}, another
TTransport derived object.
@param trans: A thrift transport object, i.e. a TSocket() object.
@type trans: TTransport
@param compresslevel: The zlib compression level, ranging
from 0 (no compression) to 9 (best compression). Default is 9.
@type compresslevel: int
"""
self.__trans = trans
self.compresslevel = compresslevel
self.__rbuf = BufferIO()
self.__wbuf = BufferIO()
self._init_zlib()
self._init_stats()
def _reinit_buffers(self):
"""Internal method to initialize/reset the internal StringIO objects
for read and write buffers.
"""
self.__rbuf = BufferIO()
self.__wbuf = BufferIO()
def _init_stats(self):
"""Internal method to reset the internal statistics counters
for compression ratios and bandwidth savings.
"""
self.bytes_in = 0
self.bytes_out = 0
self.bytes_in_comp = 0
self.bytes_out_comp = 0
def _init_zlib(self):
"""Internal method for setting up the zlib compression and
decompression objects.
"""
self._zcomp_read = zlib.decompressobj()
self._zcomp_write = zlib.compressobj(self.compresslevel)
def getCompRatio(self):
"""Get the current measured compression ratios (in,out) from
this transport.
Returns a tuple of:
(inbound_compression_ratio, outbound_compression_ratio)
The compression ratios are computed as:
compressed / uncompressed
E.g., data that compresses by 10x will have a ratio of: 0.10
and data that compresses to half of ts original size will
have a ratio of 0.5
None is returned if no bytes have yet been processed in
a particular direction.
"""
r_percent, w_percent = (None, None)
if self.bytes_in > 0:
r_percent = self.bytes_in_comp / self.bytes_in
if self.bytes_out > 0:
w_percent = self.bytes_out_comp / self.bytes_out
return (r_percent, w_percent)
def getCompSavings(self):
"""Get the current count of saved bytes due to data
compression.
Returns a tuple of:
(inbound_saved_bytes, outbound_saved_bytes)
Note: if compression is actually expanding your
data (only likely with very tiny thrift objects), then
the values returned will be negative.
"""
r_saved = self.bytes_in - self.bytes_in_comp
w_saved = self.bytes_out - self.bytes_out_comp
return (r_saved, w_saved)
def isOpen(self):
"""Return the underlying transport's open status"""
return self.__trans.isOpen()
def open(self):
"""Open the underlying transport"""
self._init_stats()
return self.__trans.open()
def listen(self):
"""Invoke the underlying transport's listen() method"""
self.__trans.listen()
def accept(self):
"""Accept connections on the underlying transport"""
return self.__trans.accept()
def close(self):
"""Close the underlying transport,"""
self._reinit_buffers()
self._init_zlib()
return self.__trans.close()
def read(self, sz):
"""Read up to sz bytes from the decompressed bytes buffer, and
read from the underlying transport if the decompression
buffer is empty.
"""
ret = self.__rbuf.read(sz)
if len(ret) > 0:
return ret
# keep reading from transport until something comes back
while True:
if self.readComp(sz):
break
ret = self.__rbuf.read(sz)
return ret
def readComp(self, sz):
"""Read compressed data from the underlying transport, then
decompress it and append it to the internal StringIO read buffer
"""
zbuf = self.__trans.read(sz)
zbuf = self._zcomp_read.unconsumed_tail + zbuf
buf = self._zcomp_read.decompress(zbuf)
self.bytes_in += len(zbuf)
self.bytes_in_comp += len(buf)
old = self.__rbuf.read()
self.__rbuf = BufferIO(old + buf)
if len(old) + len(buf) == 0:
return False
return True
def write(self, buf):
"""Write some bytes, putting them into the internal write
buffer for eventual compression.
"""
self.__wbuf.write(buf)
def flush(self):
"""Flush any queued up data in the write buffer and ensure the
compression buffer is flushed out to the underlying transport
"""
wout = self.__wbuf.getvalue()
if len(wout) > 0:
zbuf = self._zcomp_write.compress(wout)
self.bytes_out += len(wout)
self.bytes_out_comp += len(zbuf)
else:
zbuf = ''
ztail = self._zcomp_write.flush(zlib.Z_SYNC_FLUSH)
self.bytes_out_comp += len(ztail)
if (len(zbuf) + len(ztail)) > 0:
self.__wbuf = BufferIO()
self.__trans.write(zbuf + ztail)
self.__trans.flush()
@property
def cstringio_buf(self):
"""Implement the CReadableTransport interface"""
return self.__rbuf
def cstringio_refill(self, partialread, reqlen):
"""Implement the CReadableTransport interface for refill"""
retstring = partialread
if reqlen < self.DEFAULT_BUFFSIZE:
retstring += self.read(self.DEFAULT_BUFFSIZE)
while len(retstring) < reqlen:
retstring += self.read(reqlen - len(retstring))
self.__rbuf = BufferIO(retstring)
return self.__rbuf
| apache-2.0 |
CWDoherty/Baseball | Scripts/hashtags.py | 1 | 1999 | '''
Copyright (c) 2015 Chris Doherty, Oliver Nabavian
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import mysql.connector, re
config = {
'user': 'root',
'password': 'isles40',
'host': '127.0.0.1',
'database': 'baseballdb'
}
cnx = mysql.connector.connect(**config)
cursor = cnx.cursor(buffered=True)
tweets = ("SELECT message, user_id, tweet_id FROM Tweet")
cursor.execute(tweets)
tweet_list = []
count = 0
for c in cursor:
if '#' in c[0]:
tweet_list.append(c)
find_tags = re.compile("\S*#(?:\S+)")
all_tag = []
for t in tweet_list:
tags = re.findall(find_tags, t[0])
if(len(tags) > 0):
all_tag.append([tags, t[1], t[2]])
insert = ("INSERT INTO Hashtag(tag, user_id, tweet_id) VALUES (%s, %s, %s)")
query = []
for a in all_tag:
for x in a[0]:
temp = [x, a[1], a[2]]
query.append(temp)
print query
for x in range(len(query)):
try:
cursor.execute(insert, query[x])
cnx.commit()
except:
# Duplicate entries will not make it into the database
continue
cursor.close()
cnx.close()
| mit |
hectoruelo/scrapy | scrapy/loader/processors.py | 145 | 2850 | """
This module provides some commonly used processors for Item Loaders.
See documentation in docs/topics/loaders.rst
"""
from scrapy.utils.misc import arg_to_iter
from scrapy.utils.datatypes import MergeDict
from .common import wrap_loader_context
class MapCompose(object):
def __init__(self, *functions, **default_loader_context):
self.functions = functions
self.default_loader_context = default_loader_context
def __call__(self, value, loader_context=None):
values = arg_to_iter(value)
if loader_context:
context = MergeDict(loader_context, self.default_loader_context)
else:
context = self.default_loader_context
wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions]
for func in wrapped_funcs:
next_values = []
for v in values:
next_values += arg_to_iter(func(v))
values = next_values
return values
class Compose(object):
def __init__(self, *functions, **default_loader_context):
self.functions = functions
self.stop_on_none = default_loader_context.get('stop_on_none', True)
self.default_loader_context = default_loader_context
def __call__(self, value, loader_context=None):
if loader_context:
context = MergeDict(loader_context, self.default_loader_context)
else:
context = self.default_loader_context
wrapped_funcs = [wrap_loader_context(f, context) for f in self.functions]
for func in wrapped_funcs:
if value is None and self.stop_on_none:
break
value = func(value)
return value
class TakeFirst(object):
def __call__(self, values):
for value in values:
if value is not None and value != '':
return value
class Identity(object):
def __call__(self, values):
return values
class SelectJmes(object):
"""
Query the input string for the jmespath (given at instantiation),
and return the answer
Requires : jmespath(https://github.com/jmespath/jmespath)
Note: SelectJmes accepts only one input element at a time.
"""
def __init__(self, json_path):
self.json_path = json_path
import jmespath
self.compiled_path = jmespath.compile(self.json_path)
def __call__(self, value):
"""Query value for the jmespath query and return answer
:param value: a data structure (dict, list) to extract from
:return: Element extracted according to jmespath query
"""
return self.compiled_path.search(value)
class Join(object):
def __init__(self, separator=u' '):
self.separator = separator
def __call__(self, values):
return self.separator.join(values)
| bsd-3-clause |
pombredanne/pants | src/python/pants/java/jar/jar_dependency.py | 4 | 7368 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import urlparse
from pants.base.build_environment import get_buildroot
from pants.base.payload_field import stable_json_sha1
from pants.base.validation import assert_list
from pants.java.jar.exclude import Exclude
from pants.java.jar.jar_dependency_utils import M2Coordinate
from pants.util.memo import memoized_method, memoized_property
from pants.util.objects import datatype
class JarDependencyParseContextWrapper(object):
"""A pre-built Maven repository dependency.
Examples:
# The typical use case.
jar('com.puppycrawl.tools', 'checkstyle', '1.2')
# Test external dependency locally.
jar('org.foobar', 'foobar', '1.2-SNAPSHOT',
url='file:///Users/pantsdev/workspace/project/jars/checkstyle/checkstyle.jar')
# Test external dependency locally using relative path (with respect to the path
# of the belonging BUILD file)
jar('org.foobar', 'foobar', '1.2-SNAPSHOT',
url='file:../checkstyle/checkstyle.jar')
"""
def __init__(self, parse_context):
"""
:param parse_context: The BUILD file parse context.
"""
self._parse_context = parse_context
def __call__(self, org, name, rev=None, force=False, ext=None, url=None, apidocs=None,
classifier=None, mutable=None, intransitive=False, excludes=None):
"""
:param string org: The Maven ``groupId`` of this dependency.
:param string name: The Maven ``artifactId`` of this dependency.
:param string rev: The Maven ``version`` of this dependency.
If unspecified the latest available version is used.
:param boolean force: Force this specific artifact revision even if other transitive
dependencies specify a different revision. This requires specifying the ``rev`` parameter.
:param string ext: Extension of the artifact if different from the artifact type.
This is sometimes needed for artifacts packaged with Maven bundle type but stored as jars.
:param string url: URL of this artifact, if different from the Maven repo standard location
(specifying this parameter is unusual). Path of file URL can be either absolute or relative
to the belonging BUILD file.
:param string apidocs: URL of existing javadocs, which if specified, pants-generated javadocs
will properly hyperlink {\ @link}s.
:param string classifier: Classifier specifying the artifact variant to use.
:param boolean mutable: Inhibit caching of this mutable artifact. A common use is for
Maven -SNAPSHOT style artifacts in an active development/integration cycle.
:param boolean intransitive: Declares this Dependency intransitive, indicating only the jar for
the dependency itself should be downloaded and placed on the classpath
:param list excludes: Transitive dependencies of this jar to exclude.
:type excludes: list of :class:`pants.backend.jvm.targets.exclude.Exclude`
"""
return JarDependency(org, name, rev, force, ext, url, apidocs, classifier, mutable, intransitive,
excludes, self._parse_context.rel_path)
class JarDependency(datatype('JarDependency', [
'org', 'base_name', 'rev', 'force', 'ext', 'url', 'apidocs',
'classifier', 'mutable', 'intransitive', 'excludes', 'base_path'])):
"""A pre-built Maven repository dependency.
This is the developer facing api, compared to the context wrapper class
`JarDependencyParseContextWrapper`, which exposes api through build file to users.
The only additional parameter `base_path` here is so that we can retrieve the file URL
in its absolute (for ivy) or relative (for fingerprinting) form. The context wrapper class
determines the `base_path` from where `jar` is defined at.
If a relative file url is provided, its absolute form will be (`buildroot` + `base_path` + relative url).
:API: public
"""
@staticmethod
def _prepare_excludes(excludes):
return tuple(assert_list(excludes,
expected_type=Exclude,
can_be_none=True,
key_arg='excludes',
allowable=(tuple, list,)))
def __new__(cls, org, name, rev=None, force=False, ext=None, url=None, apidocs=None,
classifier=None, mutable=None, intransitive=False, excludes=None, base_path=None):
"""
:param string base_path: base path that's relative to the build root.
"""
excludes = JarDependency._prepare_excludes(excludes)
base_path = base_path or '.'
if os.path.isabs(base_path):
base_path = os.path.relpath(base_path, get_buildroot())
return super(JarDependency, cls).__new__(
cls, org=org, base_name=name, rev=rev, force=force, ext=ext, url=url, apidocs=apidocs,
classifier=classifier, mutable=mutable, intransitive=intransitive, excludes=excludes,
base_path=base_path)
@property
def name(self):
return self.base_name
@memoized_method
def get_url(self, relative=False):
if self.url:
parsed_url = urlparse.urlparse(self.url)
if parsed_url.scheme == 'file':
if relative and os.path.isabs(parsed_url.path):
relative_path = os.path.relpath(parsed_url.path,
os.path.join(get_buildroot(), self.base_path))
return 'file:{path}'.format(path=os.path.normpath(relative_path))
if not relative and not os.path.isabs(parsed_url.path):
abs_path = os.path.join(get_buildroot(), self.base_path, parsed_url.path)
return 'file://{path}'.format(path=os.path.normpath(abs_path))
return self.url
@property
def transitive(self):
return not self.intransitive
def copy(self, **replacements):
"""Returns a clone of this JarDependency with the given replacements kwargs overlaid."""
cls = type(self)
kwargs = self._asdict()
for key, val in replacements.items():
if key == 'excludes':
val = JarDependency._prepare_excludes(val)
kwargs[key] = val
org = kwargs.pop('org')
base_name = kwargs.pop('base_name')
return cls(org, base_name, **kwargs)
def __str__(self):
return 'JarDependency({})'.format(self.coordinate)
@memoized_property
def coordinate(self):
"""Returns the maven coordinate of this jar.
:rtype: :class:`pants.java.jar.M2Coordinate`
"""
return M2Coordinate(org=self.org, name=self.name, rev=self.rev, classifier=self.classifier,
ext=self.ext)
def cache_key(self):
excludes = [(e.org, e.name) for e in self.excludes]
return stable_json_sha1(dict(org=self.org,
name=self.name,
rev=self.rev,
force=self.force,
ext=self.ext,
url=self.get_url(relative=True),
classifier=self.classifier,
transitive=self.transitive,
mutable=self.mutable,
excludes=excludes,))
| apache-2.0 |
mzueger/linux-colibri-t30 | tools/perf/util/setup.py | 560 | 1379 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
perf = Extension('perf',
sources = ['util/python.c', 'util/ctype.c', 'util/evlist.c',
'util/evsel.c', 'util/cpumap.c', 'util/thread_map.c',
'util/util.c', 'util/xyarray.c', 'util/cgroup.c'],
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
roderickvd/nzbToMedia | libs/beets/util/bluelet.py | 17 | 19951 | """Extremely simple pure-Python implementation of coroutine-style
asynchronous socket I/O. Inspired by, but inferior to, Eventlet.
Bluelet can also be thought of as a less-terrible replacement for
asyncore.
Bluelet: easy concurrency without all the messy parallelism.
"""
import socket
import select
import sys
import types
import errno
import traceback
import time
import collections
# A little bit of "six" (Python 2/3 compatibility): cope with PEP 3109 syntax
# changes.
PY3 = sys.version_info[0] == 3
if PY3:
def _reraise(typ, exc, tb):
raise exc.with_traceback(tb)
else:
exec("""
def _reraise(typ, exc, tb):
raise typ, exc, tb
""")
# Basic events used for thread scheduling.
class Event(object):
"""Just a base class identifying Bluelet events. An event is an
object yielded from a Bluelet thread coroutine to suspend operation
and communicate with the scheduler.
"""
pass
class WaitableEvent(Event):
"""A waitable event is one encapsulating an action that can be
waited for using a select() call. That is, it's an event with an
associated file descriptor.
"""
def waitables(self):
"""Return "waitable" objects to pass to select(). Should return
three iterables for input readiness, output readiness, and
exceptional conditions (i.e., the three lists passed to
select()).
"""
return (), (), ()
def fire(self):
"""Called when an associated file descriptor becomes ready
(i.e., is returned from a select() call).
"""
pass
class ValueEvent(Event):
"""An event that does nothing but return a fixed value."""
def __init__(self, value):
self.value = value
class ExceptionEvent(Event):
"""Raise an exception at the yield point. Used internally."""
def __init__(self, exc_info):
self.exc_info = exc_info
class SpawnEvent(Event):
"""Add a new coroutine thread to the scheduler."""
def __init__(self, coro):
self.spawned = coro
class JoinEvent(Event):
"""Suspend the thread until the specified child thread has
completed.
"""
def __init__(self, child):
self.child = child
class KillEvent(Event):
"""Unschedule a child thread."""
def __init__(self, child):
self.child = child
class DelegationEvent(Event):
"""Suspend execution of the current thread, start a new thread and,
once the child thread finished, return control to the parent
thread.
"""
def __init__(self, coro):
self.spawned = coro
class ReturnEvent(Event):
"""Return a value the current thread's delegator at the point of
delegation. Ends the current (delegate) thread.
"""
def __init__(self, value):
self.value = value
class SleepEvent(WaitableEvent):
"""Suspend the thread for a given duration.
"""
def __init__(self, duration):
self.wakeup_time = time.time() + duration
def time_left(self):
return max(self.wakeup_time - time.time(), 0.0)
class ReadEvent(WaitableEvent):
"""Reads from a file-like object."""
def __init__(self, fd, bufsize):
self.fd = fd
self.bufsize = bufsize
def waitables(self):
return (self.fd,), (), ()
def fire(self):
return self.fd.read(self.bufsize)
class WriteEvent(WaitableEvent):
"""Writes to a file-like object."""
def __init__(self, fd, data):
self.fd = fd
self.data = data
def waitable(self):
return (), (self.fd,), ()
def fire(self):
self.fd.write(self.data)
# Core logic for executing and scheduling threads.
def _event_select(events):
"""Perform a select() over all the Events provided, returning the
ones ready to be fired. Only WaitableEvents (including SleepEvents)
matter here; all other events are ignored (and thus postponed).
"""
# Gather waitables and wakeup times.
waitable_to_event = {}
rlist, wlist, xlist = [], [], []
earliest_wakeup = None
for event in events:
if isinstance(event, SleepEvent):
if not earliest_wakeup:
earliest_wakeup = event.wakeup_time
else:
earliest_wakeup = min(earliest_wakeup, event.wakeup_time)
elif isinstance(event, WaitableEvent):
r, w, x = event.waitables()
rlist += r
wlist += w
xlist += x
for waitable in r:
waitable_to_event[('r', waitable)] = event
for waitable in w:
waitable_to_event[('w', waitable)] = event
for waitable in x:
waitable_to_event[('x', waitable)] = event
# If we have a any sleeping threads, determine how long to sleep.
if earliest_wakeup:
timeout = max(earliest_wakeup - time.time(), 0.0)
else:
timeout = None
# Perform select() if we have any waitables.
if rlist or wlist or xlist:
rready, wready, xready = select.select(rlist, wlist, xlist, timeout)
else:
rready, wready, xready = (), (), ()
if timeout:
time.sleep(timeout)
# Gather ready events corresponding to the ready waitables.
ready_events = set()
for ready in rready:
ready_events.add(waitable_to_event[('r', ready)])
for ready in wready:
ready_events.add(waitable_to_event[('w', ready)])
for ready in xready:
ready_events.add(waitable_to_event[('x', ready)])
# Gather any finished sleeps.
for event in events:
if isinstance(event, SleepEvent) and event.time_left() == 0.0:
ready_events.add(event)
return ready_events
class ThreadException(Exception):
def __init__(self, coro, exc_info):
self.coro = coro
self.exc_info = exc_info
def reraise(self):
_reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
SUSPENDED = Event() # Special sentinel placeholder for suspended threads.
class Delegated(Event):
"""Placeholder indicating that a thread has delegated execution to a
different thread.
"""
def __init__(self, child):
self.child = child
def run(root_coro):
"""Schedules a coroutine, running it to completion. This
encapsulates the Bluelet scheduler, which the root coroutine can
add to by spawning new coroutines.
"""
# The "threads" dictionary keeps track of all the currently-
# executing and suspended coroutines. It maps coroutines to their
# currently "blocking" event. The event value may be SUSPENDED if
# the coroutine is waiting on some other condition: namely, a
# delegated coroutine or a joined coroutine. In this case, the
# coroutine should *also* appear as a value in one of the below
# dictionaries `delegators` or `joiners`.
threads = {root_coro: ValueEvent(None)}
# Maps child coroutines to delegating parents.
delegators = {}
# Maps child coroutines to joining (exit-waiting) parents.
joiners = collections.defaultdict(list)
def complete_thread(coro, return_value):
"""Remove a coroutine from the scheduling pool, awaking
delegators and joiners as necessary and returning the specified
value to any delegating parent.
"""
del threads[coro]
# Resume delegator.
if coro in delegators:
threads[delegators[coro]] = ValueEvent(return_value)
del delegators[coro]
# Resume joiners.
if coro in joiners:
for parent in joiners[coro]:
threads[parent] = ValueEvent(None)
del joiners[coro]
def advance_thread(coro, value, is_exc=False):
"""After an event is fired, run a given coroutine associated with
it in the threads dict until it yields again. If the coroutine
exits, then the thread is removed from the pool. If the coroutine
raises an exception, it is reraised in a ThreadException. If
is_exc is True, then the value must be an exc_info tuple and the
exception is thrown into the coroutine.
"""
try:
if is_exc:
next_event = coro.throw(*value)
else:
next_event = coro.send(value)
except StopIteration:
# Thread is done.
complete_thread(coro, None)
except:
# Thread raised some other exception.
del threads[coro]
raise ThreadException(coro, sys.exc_info())
else:
if isinstance(next_event, types.GeneratorType):
# Automatically invoke sub-coroutines. (Shorthand for
# explicit bluelet.call().)
next_event = DelegationEvent(next_event)
threads[coro] = next_event
def kill_thread(coro):
"""Unschedule this thread and its (recursive) delegates.
"""
# Collect all coroutines in the delegation stack.
coros = [coro]
while isinstance(threads[coro], Delegated):
coro = threads[coro].child
coros.append(coro)
# Complete each coroutine from the top to the bottom of the
# stack.
for coro in reversed(coros):
complete_thread(coro, None)
# Continue advancing threads until root thread exits.
exit_te = None
while threads:
try:
# Look for events that can be run immediately. Continue
# running immediate events until nothing is ready.
while True:
have_ready = False
for coro, event in list(threads.items()):
if isinstance(event, SpawnEvent):
threads[event.spawned] = ValueEvent(None) # Spawn.
advance_thread(coro, None)
have_ready = True
elif isinstance(event, ValueEvent):
advance_thread(coro, event.value)
have_ready = True
elif isinstance(event, ExceptionEvent):
advance_thread(coro, event.exc_info, True)
have_ready = True
elif isinstance(event, DelegationEvent):
threads[coro] = Delegated(event.spawned) # Suspend.
threads[event.spawned] = ValueEvent(None) # Spawn.
delegators[event.spawned] = coro
have_ready = True
elif isinstance(event, ReturnEvent):
# Thread is done.
complete_thread(coro, event.value)
have_ready = True
elif isinstance(event, JoinEvent):
threads[coro] = SUSPENDED # Suspend.
joiners[event.child].append(coro)
have_ready = True
elif isinstance(event, KillEvent):
threads[coro] = ValueEvent(None)
kill_thread(event.child)
have_ready = True
# Only start the select when nothing else is ready.
if not have_ready:
break
# Wait and fire.
event2coro = dict((v,k) for k,v in threads.items())
for event in _event_select(threads.values()):
# Run the IO operation, but catch socket errors.
try:
value = event.fire()
except socket.error as exc:
if isinstance(exc.args, tuple) and \
exc.args[0] == errno.EPIPE:
# Broken pipe. Remote host disconnected.
pass
else:
traceback.print_exc()
# Abort the coroutine.
threads[event2coro[event]] = ReturnEvent(None)
else:
advance_thread(event2coro[event], value)
except ThreadException as te:
# Exception raised from inside a thread.
event = ExceptionEvent(te.exc_info)
if te.coro in delegators:
# The thread is a delegate. Raise exception in its
# delegator.
threads[delegators[te.coro]] = event
del delegators[te.coro]
else:
# The thread is root-level. Raise in client code.
exit_te = te
break
except:
# For instance, KeyboardInterrupt during select(). Raise
# into root thread and terminate others.
threads = {root_coro: ExceptionEvent(sys.exc_info())}
# If any threads still remain, kill them.
for coro in threads:
coro.close()
# If we're exiting with an exception, raise it in the client.
if exit_te:
exit_te.reraise()
# Sockets and their associated events.
class SocketClosedError(Exception):
pass
class Listener(object):
"""A socket wrapper object for listening sockets.
"""
def __init__(self, host, port):
"""Create a listening socket on the given hostname and port.
"""
self._closed = False
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((host, port))
self.sock.listen(5)
def accept(self):
"""An event that waits for a connection on the listening socket.
When a connection is made, the event returns a Connection
object.
"""
if self._closed:
raise SocketClosedError()
return AcceptEvent(self)
def close(self):
"""Immediately close the listening socket. (Not an event.)
"""
self._closed = True
self.sock.close()
class Connection(object):
"""A socket wrapper object for connected sockets.
"""
def __init__(self, sock, addr):
self.sock = sock
self.addr = addr
self._buf = b''
self._closed = False
def close(self):
"""Close the connection."""
self._closed = True
self.sock.close()
def recv(self, size):
"""Read at most size bytes of data from the socket."""
if self._closed:
raise SocketClosedError()
if self._buf:
# We already have data read previously.
out = self._buf[:size]
self._buf = self._buf[size:]
return ValueEvent(out)
else:
return ReceiveEvent(self, size)
def send(self, data):
"""Sends data on the socket, returning the number of bytes
successfully sent.
"""
if self._closed:
raise SocketClosedError()
return SendEvent(self, data)
def sendall(self, data):
"""Send all of data on the socket."""
if self._closed:
raise SocketClosedError()
return SendEvent(self, data, True)
def readline(self, terminator=b"\n", bufsize=1024):
"""Reads a line (delimited by terminator) from the socket."""
if self._closed:
raise SocketClosedError()
while True:
if terminator in self._buf:
line, self._buf = self._buf.split(terminator, 1)
line += terminator
yield ReturnEvent(line)
break
data = yield ReceiveEvent(self, bufsize)
if data:
self._buf += data
else:
line = self._buf
self._buf = b''
yield ReturnEvent(line)
break
class AcceptEvent(WaitableEvent):
"""An event for Listener objects (listening sockets) that suspends
execution until the socket gets a connection.
"""
def __init__(self, listener):
self.listener = listener
def waitables(self):
return (self.listener.sock,), (), ()
def fire(self):
sock, addr = self.listener.sock.accept()
return Connection(sock, addr)
class ReceiveEvent(WaitableEvent):
"""An event for Connection objects (connected sockets) for
asynchronously reading data.
"""
def __init__(self, conn, bufsize):
self.conn = conn
self.bufsize = bufsize
def waitables(self):
return (self.conn.sock,), (), ()
def fire(self):
return self.conn.sock.recv(self.bufsize)
class SendEvent(WaitableEvent):
"""An event for Connection objects (connected sockets) for
asynchronously writing data.
"""
def __init__(self, conn, data, sendall=False):
self.conn = conn
self.data = data
self.sendall = sendall
def waitables(self):
return (), (self.conn.sock,), ()
def fire(self):
if self.sendall:
return self.conn.sock.sendall(self.data)
else:
return self.conn.sock.send(self.data)
# Public interface for threads; each returns an event object that
# can immediately be "yield"ed.
def null():
"""Event: yield to the scheduler without doing anything special.
"""
return ValueEvent(None)
def spawn(coro):
"""Event: add another coroutine to the scheduler. Both the parent
and child coroutines run concurrently.
"""
if not isinstance(coro, types.GeneratorType):
raise ValueError('%s is not a coroutine' % str(coro))
return SpawnEvent(coro)
def call(coro):
"""Event: delegate to another coroutine. The current coroutine
is resumed once the sub-coroutine finishes. If the sub-coroutine
returns a value using end(), then this event returns that value.
"""
if not isinstance(coro, types.GeneratorType):
raise ValueError('%s is not a coroutine' % str(coro))
return DelegationEvent(coro)
def end(value=None):
"""Event: ends the coroutine and returns a value to its
delegator.
"""
return ReturnEvent(value)
def read(fd, bufsize=None):
"""Event: read from a file descriptor asynchronously."""
if bufsize is None:
# Read all.
def reader():
buf = []
while True:
data = yield read(fd, 1024)
if not data:
break
buf.append(data)
yield ReturnEvent(''.join(buf))
return DelegationEvent(reader())
else:
return ReadEvent(fd, bufsize)
def write(fd, data):
"""Event: write to a file descriptor asynchronously."""
return WriteEvent(fd, data)
def connect(host, port):
"""Event: connect to a network address and return a Connection
object for communicating on the socket.
"""
addr = (host, port)
sock = socket.create_connection(addr)
return ValueEvent(Connection(sock, addr))
def sleep(duration):
"""Event: suspend the thread for ``duration`` seconds.
"""
return SleepEvent(duration)
def join(coro):
"""Suspend the thread until another, previously `spawn`ed thread
completes.
"""
return JoinEvent(coro)
def kill(coro):
"""Halt the execution of a different `spawn`ed thread.
"""
return KillEvent(coro)
# Convenience function for running socket servers.
def server(host, port, func):
"""A coroutine that runs a network server. Host and port specify the
listening address. func should be a coroutine that takes a single
parameter, a Connection object. The coroutine is invoked for every
incoming connection on the listening socket.
"""
def handler(conn):
try:
yield func(conn)
finally:
conn.close()
listener = Listener(host, port)
try:
while True:
conn = yield listener.accept()
yield spawn(handler(conn))
except KeyboardInterrupt:
pass
finally:
listener.close()
| gpl-3.0 |
andyrimmer/Platypus | scripts/computeIndelRatio.py | 2 | 1493 | """
This script produces summary plots and tables of the Platypus SNP and indel
calls, and comparison plots of Platypus calls with the validated 1000 genomes
calls.
"""
from __future__ import division
from math import sqrt,pow,log,exp,pi,log10
import sys
import gzip
###################################################################################################
def summariseVariantCalls():
"""
Summarise the variant calls in a given vcf file.
"""
nSNPs = 0
nTransitionSnps = 0
nTransversionSnps = 0
for index,line in enumerate(sys.stdin):
try:
if line[0] == "#":
continue
cols = line.split("\t")
chrom = cols[0]
pos = int(cols[1])
ref = cols[3]
alt = cols[4]
qual = None
alts = None
nSNPs += 1
alleles = sorted([ref,alt])
if alleles == ["A","G"] or alleles == ["C","T"]:
nTransitionSnps += 1
else:
nTransversionSnps += 1
except Exception:
#print line
continue
#raise
print "nSNP = %s. \t TsTv = %s" %(nSNPs,nTransitionSnps/nTransversionSnps)
###################################################################################################
if __name__ == "__main__":
summariseVariantCalls()
###################################################################################################
| gpl-3.0 |
thaumos/ansible | lib/ansible/modules/network/fortimanager/fmgr_fwobj_address.py | 38 | 24230 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community"
}
DOCUMENTATION = '''
---
module: fmgr_fwobj_address
version_added: "2.8"
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: Allows the management of firewall objects in FortiManager
description:
- Allows for the management of IPv4, IPv6, and multicast address objects within FortiManager.
options:
adom:
description:
- The ADOM the configuration should belong to.
required: false
default: root
allow_routing:
description:
- Enable/disable use of this address in the static route configuration.
choices: ['enable', 'disable']
default: 'disable'
associated_interface:
description:
- Associated interface name.
cache_ttl:
description:
- Minimal TTL of individual IP addresses in FQDN cache. Only applies when type = wildcard-fqdn.
color:
description:
- Color of the object in FortiManager GUI.
- Takes integers 1-32
default: 22
comment:
description:
- Comment for the object in FortiManager.
country:
description:
- Country name. Required if type = geographic.
end_ip:
description:
- End IP. Only used when ipv4 = iprange.
group_members:
description:
- Address group member. If this is defined w/out group_name, the operation will fail.
group_name:
description:
- Address group name. If this is defined in playbook task, all other options are ignored.
ipv4:
description:
- Type of IPv4 Object.
- Must not be specified with either multicast or IPv6 parameters.
choices: ['ipmask', 'iprange', 'fqdn', 'wildcard', 'geography', 'wildcard-fqdn', 'group']
ipv4addr:
description:
- IP and network mask. If only defining one IP use this parameter. (i.e. 10.7.220.30/255.255.255.255)
- Can also define subnets (i.e. 10.7.220.0/255.255.255.0)
- Also accepts CIDR (i.e. 10.7.220.0/24)
- If Netmask is omitted after IP address, /32 is assumed.
- When multicast is set to Broadcast Subnet the ipv4addr parameter is used to specify the subnet.
ipv6:
description:
- Puts module into IPv6 mode.
- Must not be specified with either ipv4 or multicast parameters.
choices: ['ip', 'iprange', 'group']
ipv6addr:
description:
- IPv6 address in full. (i.e. 2001:0db8:85a3:0000:0000:8a2e:0370:7334)
fqdn:
description:
- Fully qualified domain name.
mode:
description:
- Sets one of three modes for managing the object.
choices: ['add', 'set', 'delete']
default: add
multicast:
description:
- Manages Multicast Address Objects.
- Sets either a Multicast IP Range or a Broadcast Subnet.
- Must not be specified with either ipv4 or ipv6 parameters.
- When set to Broadcast Subnet the ipv4addr parameter is used to specify the subnet.
- Can create IPv4 Multicast Objects (multicastrange and broadcastmask options -- uses start/end-ip and ipv4addr).
choices: ['multicastrange', 'broadcastmask', 'ip6']
name:
description:
- Friendly Name Address object name in FortiManager.
obj_id:
description:
- Object ID for NSX.
start_ip:
description:
- Start IP. Only used when ipv4 = iprange.
visibility:
description:
- Enable/disable address visibility.
choices: ['enable', 'disable']
default: 'enable'
wildcard:
description:
- IP address and wildcard netmask. Required if ipv4 = wildcard.
wildcard_fqdn:
description:
- Wildcard FQDN. Required if ipv4 = wildcard-fqdn.
'''
EXAMPLES = '''
- name: ADD IPv4 IP ADDRESS OBJECT
fmgr_fwobj_address:
ipv4: "ipmask"
ipv4addr: "10.7.220.30/32"
name: "ansible_v4Obj"
comment: "Created by Ansible"
color: "6"
- name: ADD IPv4 IP ADDRESS OBJECT MORE OPTIONS
fmgr_fwobj_address:
ipv4: "ipmask"
ipv4addr: "10.7.220.34/32"
name: "ansible_v4Obj_MORE"
comment: "Created by Ansible"
color: "6"
allow_routing: "enable"
cache_ttl: "180"
associated_interface: "port1"
obj_id: "123"
- name: ADD IPv4 IP ADDRESS SUBNET OBJECT
fmgr_fwobj_address:
ipv4: "ipmask"
ipv4addr: "10.7.220.0/255.255.255.128"
name: "ansible_subnet"
comment: "Created by Ansible"
mode: "set"
- name: ADD IPv4 IP ADDRESS RANGE OBJECT
fmgr_fwobj_address:
ipv4: "iprange"
start_ip: "10.7.220.1"
end_ip: "10.7.220.125"
name: "ansible_range"
comment: "Created by Ansible"
- name: ADD IPv4 IP ADDRESS WILDCARD OBJECT
fmgr_fwobj_address:
ipv4: "wildcard"
wildcard: "10.7.220.30/255.255.255.255"
name: "ansible_wildcard"
comment: "Created by Ansible"
- name: ADD IPv4 IP ADDRESS WILDCARD FQDN OBJECT
fmgr_fwobj_address:
ipv4: "wildcard-fqdn"
wildcard_fqdn: "*.myds.com"
name: "Synology myds DDNS service"
comment: "Created by Ansible"
- name: ADD IPv4 IP ADDRESS FQDN OBJECT
fmgr_fwobj_address:
ipv4: "fqdn"
fqdn: "ansible.com"
name: "ansible_fqdn"
comment: "Created by Ansible"
- name: ADD IPv4 IP ADDRESS GEO OBJECT
fmgr_fwobj_address:
ipv4: "geography"
country: "usa"
name: "ansible_geo"
comment: "Created by Ansible"
- name: ADD IPv6 ADDRESS
fmgr_fwobj_address:
ipv6: "ip"
ipv6addr: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
name: "ansible_v6Obj"
comment: "Created by Ansible"
- name: ADD IPv6 ADDRESS RANGE
fmgr_fwobj_address:
ipv6: "iprange"
start_ip: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
end_ip: "2001:0db8:85a3:0000:0000:8a2e:0370:7446"
name: "ansible_v6range"
comment: "Created by Ansible"
- name: ADD IPv4 IP ADDRESS GROUP
fmgr_fwobj_address:
ipv4: "group"
group_name: "ansibleIPv4Group"
group_members: "ansible_fqdn, ansible_wildcard, ansible_range"
- name: ADD IPv6 IP ADDRESS GROUP
fmgr_fwobj_address:
ipv6: "group"
group_name: "ansibleIPv6Group"
group_members: "ansible_v6Obj, ansible_v6range"
- name: ADD MULTICAST RANGE
fmgr_fwobj_address:
multicast: "multicastrange"
start_ip: "224.0.0.251"
end_ip: "224.0.0.251"
name: "ansible_multicastrange"
comment: "Created by Ansible"
- name: ADD BROADCAST SUBNET
fmgr_fwobj_address:
multicast: "broadcastmask"
ipv4addr: "10.7.220.0/24"
name: "ansible_broadcastSubnet"
comment: "Created by Ansible"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
import re
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def fmgr_fwobj_ipv4(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
# EVAL THE MODE PARAMETER FOR SET OR ADD
if paramgram["mode"] in ['set', 'add']:
# CREATE THE DATAGRAM DICTIONARY
# ENSURE THE DATAGRAM KEYS MATCH THE JSON API GUIDE ATTRIBUTES, NOT WHAT IS IN ANSIBLE
# SOME PARAMETERS SHOWN IN THIS DICTIONARY WE DON'T EVEN ASK THE USER FOR IN PLAYBOOKS BUT ARE REQUIRED
datagram = {
"comment": paramgram["comment"],
"associated-interface": paramgram["associated-interface"],
"cache-ttl": paramgram["cache-ttl"],
"name": paramgram["name"],
"allow-routing": paramgram["allow-routing"],
"color": paramgram["color"],
"meta fields": {},
"dynamic_mapping": [],
"visibility": paramgram["allow-routing"],
"type": paramgram["ipv4"],
}
# SET THE CORRECT URL BASED ON THE TYPE (WE'RE DOING GROUPS IN THIS METHOD, TOO)
if datagram["type"] == "group":
url = '/pm/config/adom/{adom}/obj/firewall/addrgrp'.format(adom=paramgram["adom"])
else:
url = '/pm/config/adom/{adom}/obj/firewall/address'.format(adom=paramgram["adom"])
#########################
# IF type = 'ipmask'
#########################
if datagram["type"] == "ipmask":
# CREATE THE SUBNET LIST OBJECT
subnet = []
# EVAL THE IPV4ADDR INPUT AND SPLIT THE IP ADDRESS FROM THE MASK AND APPEND THEM TO THE SUBNET LIST
for subnets in paramgram["ipv4addr"].split("/"):
subnet.append(subnets)
# CHECK THAT THE SECOND ENTRY IN THE SUBNET LIST (WHAT WAS TO THE RIGHT OF THE / CHARACTER)
# IS IN SUBNET MASK FORMAT AND NOT CIDR FORMAT.
# IF IT IS IN CIDR FORMAT, WE NEED TO CONVERT IT TO SUBNET BIT MASK FORMAT FOR THE JSON API
if not re.match(r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}', subnet[1]):
# IF THE SUBNET PARAMETER INPUT DIDN'T LOOK LIKE xxx.xxx.xxx.xxx TO REGEX...
# ... RUN IT THROUGH THE CIDR_TO_NETMASK() FUNCTION
mask = fmgr._tools.cidr_to_netmask(subnet[1])
# AND THEN UPDATE THE SUBNET LIST OBJECT
subnet[1] = mask
# INCLUDE THE SUBNET LIST OBJECT IN THE DATAGRAM DICTIONARY TO BE SUBMITTED
datagram["subnet"] = subnet
#########################
# IF type = 'iprange'
#########################
if datagram["type"] == "iprange":
datagram["start-ip"] = paramgram["start-ip"]
datagram["end-ip"] = paramgram["end-ip"]
datagram["subnet"] = ["0.0.0.0", "0.0.0.0"]
#########################
# IF type = 'geography'
#########################
if datagram["type"] == "geography":
datagram["country"] = paramgram["country"]
#########################
# IF type = 'wildcard'
#########################
if datagram["type"] == "wildcard":
subnet = []
for subnets in paramgram["wildcard"].split("/"):
subnet.append(subnets)
if not re.match(r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}', subnet[1]):
mask = fmgr._tools.cidr_to_netmask(subnet[1])
subnet[1] = mask
datagram["wildcard"] = subnet
#########################
# IF type = 'wildcard-fqdn'
#########################
if datagram["type"] == "wildcard-fqdn":
datagram["wildcard-fqdn"] = paramgram["wildcard-fqdn"]
#########################
# IF type = 'fqdn'
#########################
if datagram["type"] == "fqdn":
datagram["fqdn"] = paramgram["fqdn"]
#########################
# IF type = 'group'
#########################
if datagram["type"] == "group":
datagram = {
"comment": paramgram["comment"],
"name": paramgram["group_name"],
"color": paramgram["color"],
"meta fields": {},
"dynamic_mapping": [],
"visibility": paramgram["visibility"]
}
members = []
group_members = paramgram["group_members"].replace(" ", "")
try:
for member in group_members.split(","):
members.append(member)
except Exception:
pass
datagram["member"] = members
# EVAL THE MODE PARAMETER FOR DELETE
if paramgram["mode"] == "delete":
# IF A GROUP, SET THE CORRECT NAME AND URL FOR THE GROUP ENDPOINT
if paramgram["ipv4"] == "group":
datagram = {}
url = '/pm/config/adom/{adom}/obj/firewall/addrgrp/{name}'.format(adom=paramgram["adom"],
name=paramgram["group_name"])
# OTHERWISE WE'RE JUST GOING TO USE THE ADDRESS ENDPOINT
else:
datagram = {}
url = '/pm/config/adom/{adom}/obj/firewall/address/{name}'.format(adom=paramgram["adom"],
name=paramgram["name"])
response = fmgr.process_request(url, datagram, paramgram["mode"])
return response
def fmgr_fwobj_ipv6(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
# EVAL THE MODE PARAMETER FOR SET OR ADD
if paramgram["mode"] in ['set', 'add']:
# CREATE THE DATAGRAM DICTIONARY
# ENSURE THE DATAGRAM KEYS MATCH THE JSON API GUIDE ATTRIBUTES, NOT WHAT IS IN ANSIBLE
# SOME PARAMETERS SHOWN IN THIS DICTIONARY WE DON'T EVEN ASK THE USER FOR IN PLAYBOOKS BUT ARE REQUIRED
datagram = {
"comment": paramgram["comment"],
"name": paramgram["name"],
"color": paramgram["color"],
"dynamic_mapping": [],
"visibility": paramgram["visibility"],
"type": paramgram["ipv6"]
}
# SET THE CORRECT URL BASED ON THE TYPE (WE'RE DOING GROUPS IN THIS METHOD, TOO)
if datagram["type"] == "group":
url = '/pm/config/adom/{adom}/obj/firewall/addrgrp6'.format(adom=paramgram["adom"])
else:
url = '/pm/config/adom/{adom}/obj/firewall/address6'.format(adom=paramgram["adom"])
#########################
# IF type = 'ip'
#########################
if datagram["type"] == "ip":
datagram["type"] = "ipprefix"
datagram["ip6"] = paramgram["ipv6addr"]
#########################
# IF type = 'iprange'
#########################
if datagram["type"] == "iprange":
datagram["start-ip"] = paramgram["start-ip"]
datagram["end-ip"] = paramgram["end-ip"]
#########################
# IF type = 'group'
#########################
if datagram["type"] == "group":
datagram = None
datagram = {
"comment": paramgram["comment"],
"name": paramgram["group_name"],
"color": paramgram["color"],
"visibility": paramgram["visibility"]
}
members = []
group_members = paramgram["group_members"].replace(" ", "")
try:
for member in group_members.split(","):
members.append(member)
except Exception:
pass
datagram["member"] = members
# EVAL THE MODE PARAMETER FOR DELETE
if paramgram["mode"] == "delete":
# IF A GROUP, SET THE CORRECT NAME AND URL FOR THE GROUP ENDPOINT
if paramgram["ipv6"] == "group":
datagram = {}
url = '/pm/config/adom/{adom}/obj/firewall/addrgrp6/{name}'.format(adom=paramgram["adom"],
name=paramgram["group_name"])
# OTHERWISE WE'RE JUST GOING TO USE THE ADDRESS ENDPOINT
else:
datagram = {}
url = '/pm/config/adom/{adom}/obj/firewall/address6/{name}'.format(adom=paramgram["adom"],
name=paramgram["name"])
response = fmgr.process_request(url, datagram, paramgram["mode"])
return response
def fmgr_fwobj_multicast(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
# EVAL THE MODE PARAMETER FOR SET OR ADD
if paramgram["mode"] in ['set', 'add']:
# CREATE THE DATAGRAM DICTIONARY
# ENSURE THE DATAGRAM KEYS MATCH THE JSON API GUIDE ATTRIBUTES, NOT WHAT IS IN ANSIBLE
# SOME PARAMETERS SHOWN IN THIS DICTIONARY WE DON'T EVEN ASK THE USER FOR IN PLAYBOOKS BUT ARE REQUIRED
datagram = {
"associated-interface": paramgram["associated-interface"],
"comment": paramgram["comment"],
"name": paramgram["name"],
"color": paramgram["color"],
"type": paramgram["multicast"],
"visibility": paramgram["visibility"],
}
# SET THE CORRECT URL
url = '/pm/config/adom/{adom}/obj/firewall/multicast-address'.format(adom=paramgram["adom"])
#########################
# IF type = 'multicastrange'
#########################
if paramgram["multicast"] == "multicastrange":
datagram["start-ip"] = paramgram["start-ip"]
datagram["end-ip"] = paramgram["end-ip"]
datagram["subnet"] = ["0.0.0.0", "0.0.0.0"]
#########################
# IF type = 'broadcastmask'
#########################
if paramgram["multicast"] == "broadcastmask":
# EVAL THE IPV4ADDR INPUT AND SPLIT THE IP ADDRESS FROM THE MASK AND APPEND THEM TO THE SUBNET LIST
subnet = []
for subnets in paramgram["ipv4addr"].split("/"):
subnet.append(subnets)
# CHECK THAT THE SECOND ENTRY IN THE SUBNET LIST (WHAT WAS TO THE RIGHT OF THE / CHARACTER)
# IS IN SUBNET MASK FORMAT AND NOT CIDR FORMAT.
# IF IT IS IN CIDR FORMAT, WE NEED TO CONVERT IT TO SUBNET BIT MASK FORMAT FOR THE JSON API
if not re.match(r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}', subnet[1]):
# IF THE SUBNET PARAMETER INPUT DIDN'T LOOK LIKE 255.255.255.255 TO REGEX...
# ... RUN IT THROUGH THE fmgr_cidr_to_netmask() FUNCTION
mask = fmgr._tools.cidr_to_netmask(subnet[1])
# AND THEN UPDATE THE SUBNET LIST OBJECT
subnet[1] = mask
# INCLUDE THE SUBNET LIST OBJECT IN THE DATAGRAM DICTIONARY TO BE SUBMITTED
datagram["subnet"] = subnet
# EVAL THE MODE PARAMETER FOR DELETE
if paramgram["mode"] == "delete":
datagram = {
"name": paramgram["name"]
}
# SET THE CORRECT URL FOR DELETE
url = '/pm/config/adom/{adom}/obj/firewall/multicast-address/{name}'.format(adom=paramgram["adom"],
name=paramgram["name"])
response = fmgr.process_request(url, datagram, paramgram["mode"])
return response
def main():
argument_spec = dict(
adom=dict(required=False, type="str", default="root"),
mode=dict(choices=["add", "set", "delete"], type="str", default="add"),
allow_routing=dict(required=False, type="str", choices=['enable', 'disable'], default="disable"),
associated_interface=dict(required=False, type="str"),
cache_ttl=dict(required=False, type="str"),
color=dict(required=False, type="str", default=22),
comment=dict(required=False, type="str"),
country=dict(required=False, type="str"),
fqdn=dict(required=False, type="str"),
name=dict(required=False, type="str"),
start_ip=dict(required=False, type="str"),
end_ip=dict(required=False, type="str"),
ipv4=dict(required=False, type="str", choices=['ipmask', 'iprange', 'fqdn', 'wildcard',
'geography', 'wildcard-fqdn', 'group']),
visibility=dict(required=False, type="str", choices=['enable', 'disable'], default="enable"),
wildcard=dict(required=False, type="str"),
wildcard_fqdn=dict(required=False, type="str"),
ipv6=dict(required=False, type="str", choices=['ip', 'iprange', 'group']),
group_members=dict(required=False, type="str"),
group_name=dict(required=False, type="str"),
ipv4addr=dict(required=False, type="str"),
ipv6addr=dict(required=False, type="str"),
multicast=dict(required=False, type="str", choices=['multicastrange', 'broadcastmask', 'ip6']),
obj_id=dict(required=False, type="str"),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
mutually_exclusive=[
['ipv4', 'ipv6'],
['ipv4', 'multicast'],
['ipv6', 'multicast']
])
paramgram = {
"adom": module.params["adom"],
"allow-routing": module.params["allow_routing"],
"associated-interface": module.params["associated_interface"],
"cache-ttl": module.params["cache_ttl"],
"color": module.params["color"],
"comment": module.params["comment"],
"country": module.params["country"],
"end-ip": module.params["end_ip"],
"fqdn": module.params["fqdn"],
"name": module.params["name"],
"start-ip": module.params["start_ip"],
"visibility": module.params["visibility"],
"wildcard": module.params["wildcard"],
"wildcard-fqdn": module.params["wildcard_fqdn"],
"ipv6": module.params["ipv6"],
"ipv4": module.params["ipv4"],
"group_members": module.params["group_members"],
"group_name": module.params["group_name"],
"ipv4addr": module.params["ipv4addr"],
"ipv6addr": module.params["ipv6addr"],
"multicast": module.params["multicast"],
"mode": module.params["mode"],
"obj-id": module.params["obj_id"],
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr._tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
results = DEFAULT_RESULT_OBJ
try:
if paramgram["ipv4"]:
results = fmgr_fwobj_ipv4(fmgr, paramgram)
elif paramgram["ipv6"]:
results = fmgr_fwobj_ipv6(fmgr, paramgram)
elif paramgram["multicast"]:
results = fmgr_fwobj_multicast(fmgr, paramgram)
fmgr.govern_response(module=module, results=results,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
if results is not None:
return module.exit_json(**results[1])
else:
return module.exit_json(msg="Couldn't find a proper ipv4 or ipv6 or multicast parameter "
"to run in the logic tree. Exiting...")
if __name__ == "__main__":
main()
| gpl-3.0 |
betoesquivel/CIE | flask/lib/python2.7/site-packages/sqlalchemy/orm/base.py | 32 | 13181 | # orm/base.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Constants and rudimental functions used throughout the ORM.
"""
from .. import util, inspection, exc as sa_exc
from ..sql import expression
from . import exc
import operator
PASSIVE_NO_RESULT = util.symbol(
'PASSIVE_NO_RESULT',
"""Symbol returned by a loader callable or other attribute/history
retrieval operation when a value could not be determined, based
on loader callable flags.
"""
)
ATTR_WAS_SET = util.symbol(
'ATTR_WAS_SET',
"""Symbol returned by a loader callable to indicate the
retrieved value, or values, were assigned to their attributes
on the target object.
"""
)
ATTR_EMPTY = util.symbol(
'ATTR_EMPTY',
"""Symbol used internally to indicate an attribute had no callable."""
)
NO_VALUE = util.symbol(
'NO_VALUE',
"""Symbol which may be placed as the 'previous' value of an attribute,
indicating no value was loaded for an attribute when it was modified,
and flags indicated we were not to load it.
"""
)
NEVER_SET = util.symbol(
'NEVER_SET',
"""Symbol which may be placed as the 'previous' value of an attribute
indicating that the attribute had not been assigned to previously.
"""
)
NO_CHANGE = util.symbol(
"NO_CHANGE",
"""No callables or SQL should be emitted on attribute access
and no state should change
""", canonical=0
)
CALLABLES_OK = util.symbol(
"CALLABLES_OK",
"""Loader callables can be fired off if a value
is not present.
""", canonical=1
)
SQL_OK = util.symbol(
"SQL_OK",
"""Loader callables can emit SQL at least on scalar value attributes.""",
canonical=2
)
RELATED_OBJECT_OK = util.symbol(
"RELATED_OBJECT_OK",
"""Callables can use SQL to load related objects as well
as scalar value attributes.
""", canonical=4
)
INIT_OK = util.symbol(
"INIT_OK",
"""Attributes should be initialized with a blank
value (None or an empty collection) upon get, if no other
value can be obtained.
""", canonical=8
)
NON_PERSISTENT_OK = util.symbol(
"NON_PERSISTENT_OK",
"""Callables can be emitted if the parent is not persistent.""",
canonical=16
)
LOAD_AGAINST_COMMITTED = util.symbol(
"LOAD_AGAINST_COMMITTED",
"""Callables should use committed values as primary/foreign keys during a
load.
""", canonical=32
)
NO_AUTOFLUSH = util.symbol(
"NO_AUTOFLUSH",
"""Loader callables should disable autoflush.""",
canonical=64
)
# pre-packaged sets of flags used as inputs
PASSIVE_OFF = util.symbol(
"PASSIVE_OFF",
"Callables can be emitted in all cases.",
canonical=(RELATED_OBJECT_OK | NON_PERSISTENT_OK |
INIT_OK | CALLABLES_OK | SQL_OK)
)
PASSIVE_RETURN_NEVER_SET = util.symbol(
"PASSIVE_RETURN_NEVER_SET",
"""PASSIVE_OFF ^ INIT_OK""",
canonical=PASSIVE_OFF ^ INIT_OK
)
PASSIVE_NO_INITIALIZE = util.symbol(
"PASSIVE_NO_INITIALIZE",
"PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK",
canonical=PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK
)
PASSIVE_NO_FETCH = util.symbol(
"PASSIVE_NO_FETCH",
"PASSIVE_OFF ^ SQL_OK",
canonical=PASSIVE_OFF ^ SQL_OK
)
PASSIVE_NO_FETCH_RELATED = util.symbol(
"PASSIVE_NO_FETCH_RELATED",
"PASSIVE_OFF ^ RELATED_OBJECT_OK",
canonical=PASSIVE_OFF ^ RELATED_OBJECT_OK
)
PASSIVE_ONLY_PERSISTENT = util.symbol(
"PASSIVE_ONLY_PERSISTENT",
"PASSIVE_OFF ^ NON_PERSISTENT_OK",
canonical=PASSIVE_OFF ^ NON_PERSISTENT_OK
)
DEFAULT_MANAGER_ATTR = '_sa_class_manager'
DEFAULT_STATE_ATTR = '_sa_instance_state'
_INSTRUMENTOR = ('mapper', 'instrumentor')
EXT_CONTINUE = util.symbol('EXT_CONTINUE')
EXT_STOP = util.symbol('EXT_STOP')
ONETOMANY = util.symbol('ONETOMANY',
"""Indicates the one-to-many direction for a :func:`.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""")
MANYTOONE = util.symbol('MANYTOONE',
"""Indicates the many-to-one direction for a :func:`.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""")
MANYTOMANY = util.symbol('MANYTOMANY',
"""Indicates the many-to-many direction for a :func:`.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""")
NOT_EXTENSION = util.symbol('NOT_EXTENSION',
"""Symbol indicating an :class:`_InspectionAttr` that's
not part of sqlalchemy.ext.
Is assigned to the :attr:`._InspectionAttr.extension_type`
attibute.
""")
_none_set = frozenset([None])
def _generative(*assertions):
"""Mark a method as generative, e.g. method-chained."""
@util.decorator
def generate(fn, *args, **kw):
self = args[0]._clone()
for assertion in assertions:
assertion(self, fn.__name__)
fn(self, *args[1:], **kw)
return self
return generate
# these can be replaced by sqlalchemy.ext.instrumentation
# if augmented class instrumentation is enabled.
def manager_of_class(cls):
return cls.__dict__.get(DEFAULT_MANAGER_ATTR, None)
instance_state = operator.attrgetter(DEFAULT_STATE_ATTR)
instance_dict = operator.attrgetter('__dict__')
def instance_str(instance):
"""Return a string describing an instance."""
return state_str(instance_state(instance))
def state_str(state):
"""Return a string describing an instance via its InstanceState."""
if state is None:
return "None"
else:
return '<%s at 0x%x>' % (state.class_.__name__, id(state.obj()))
def state_class_str(state):
"""Return a string describing an instance's class via its
InstanceState.
"""
if state is None:
return "None"
else:
return '<%s>' % (state.class_.__name__, )
def attribute_str(instance, attribute):
return instance_str(instance) + "." + attribute
def state_attribute_str(state, attribute):
return state_str(state) + "." + attribute
def object_mapper(instance):
"""Given an object, return the primary Mapper associated with the object
instance.
Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError`
if no mapping is configured.
This function is available via the inspection system as::
inspect(instance).mapper
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is
not part of a mapping.
"""
return object_state(instance).mapper
def object_state(instance):
"""Given an object, return the :class:`.InstanceState`
associated with the object.
Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError`
if no mapping is configured.
Equivalent functionality is available via the :func:`.inspect`
function as::
inspect(instance)
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is
not part of a mapping.
"""
state = _inspect_mapped_object(instance)
if state is None:
raise exc.UnmappedInstanceError(instance)
else:
return state
@inspection._inspects(object)
def _inspect_mapped_object(instance):
try:
return instance_state(instance)
# TODO: whats the py-2/3 syntax to catch two
# different kinds of exceptions at once ?
except exc.UnmappedClassError:
return None
except exc.NO_STATE:
return None
def _class_to_mapper(class_or_mapper):
insp = inspection.inspect(class_or_mapper, False)
if insp is not None:
return insp.mapper
else:
raise exc.UnmappedClassError(class_or_mapper)
def _mapper_or_none(entity):
"""Return the :class:`.Mapper` for the given class or None if the
class is not mapped.
"""
insp = inspection.inspect(entity, False)
if insp is not None:
return insp.mapper
else:
return None
def _is_mapped_class(entity):
"""Return True if the given object is a mapped class,
:class:`.Mapper`, or :class:`.AliasedClass`.
"""
insp = inspection.inspect(entity, False)
return insp is not None and \
hasattr(insp, "mapper") and \
(
insp.is_mapper
or insp.is_aliased_class
)
def _attr_as_key(attr):
if hasattr(attr, 'key'):
return attr.key
else:
return expression._column_as_key(attr)
def _orm_columns(entity):
insp = inspection.inspect(entity, False)
if hasattr(insp, 'selectable'):
return [c for c in insp.selectable.c]
else:
return [entity]
def _is_aliased_class(entity):
insp = inspection.inspect(entity, False)
return insp is not None and \
getattr(insp, "is_aliased_class", False)
def _entity_descriptor(entity, key):
"""Return a class attribute given an entity and string name.
May return :class:`.InstrumentedAttribute` or user-defined
attribute.
"""
insp = inspection.inspect(entity)
if insp.is_selectable:
description = entity
entity = insp.c
elif insp.is_aliased_class:
entity = insp.entity
description = entity
elif hasattr(insp, "mapper"):
description = entity = insp.mapper.class_
else:
description = entity
try:
return getattr(entity, key)
except AttributeError:
raise sa_exc.InvalidRequestError(
"Entity '%s' has no property '%s'" %
(description, key)
)
_state_mapper = util.dottedgetter('manager.mapper')
@inspection._inspects(type)
def _inspect_mapped_class(class_, configure=False):
try:
class_manager = manager_of_class(class_)
if not class_manager.is_mapped:
return None
mapper = class_manager.mapper
except exc.NO_STATE:
return None
else:
if configure and mapper._new_mappers:
mapper._configure_all()
return mapper
def class_mapper(class_, configure=True):
"""Given a class, return the primary :class:`.Mapper` associated
with the key.
Raises :exc:`.UnmappedClassError` if no mapping is configured
on the given class, or :exc:`.ArgumentError` if a non-class
object is passed.
Equivalent functionality is available via the :func:`.inspect`
function as::
inspect(some_mapped_class)
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the class is not mapped.
"""
mapper = _inspect_mapped_class(class_, configure=configure)
if mapper is None:
if not isinstance(class_, type):
raise sa_exc.ArgumentError(
"Class object expected, got '%r'." % (class_, ))
raise exc.UnmappedClassError(class_)
else:
return mapper
class _InspectionAttr(object):
"""A base class applied to all ORM objects that can be returned
by the :func:`.inspect` function.
The attributes defined here allow the usage of simple boolean
checks to test basic facts about the object returned.
While the boolean checks here are basically the same as using
the Python isinstance() function, the flags here can be used without
the need to import all of these classes, and also such that
the SQLAlchemy class system can change while leaving the flags
here intact for forwards-compatibility.
"""
is_selectable = False
"""Return True if this object is an instance of :class:`.Selectable`."""
is_aliased_class = False
"""True if this object is an instance of :class:`.AliasedClass`."""
is_instance = False
"""True if this object is an instance of :class:`.InstanceState`."""
is_mapper = False
"""True if this object is an instance of :class:`.Mapper`."""
is_property = False
"""True if this object is an instance of :class:`.MapperProperty`."""
is_attribute = False
"""True if this object is a Python :term:`descriptor`.
This can refer to one of many types. Usually a
:class:`.QueryableAttribute` which handles attributes events on behalf
of a :class:`.MapperProperty`. But can also be an extension type
such as :class:`.AssociationProxy` or :class:`.hybrid_property`.
The :attr:`._InspectionAttr.extension_type` will refer to a constant
identifying the specific subtype.
.. seealso::
:attr:`.Mapper.all_orm_descriptors`
"""
is_clause_element = False
"""True if this object is an instance of :class:`.ClauseElement`."""
extension_type = NOT_EXTENSION
"""The extension type, if any.
Defaults to :data:`.interfaces.NOT_EXTENSION`
.. versionadded:: 0.8.0
.. seealso::
:data:`.HYBRID_METHOD`
:data:`.HYBRID_PROPERTY`
:data:`.ASSOCIATION_PROXY`
"""
class _MappedAttribute(object):
"""Mixin for attributes which should be replaced by mapper-assigned
attributes.
"""
| mit |
AmberJBlue/aima-python | submissions/Hess/vacuum2.py | 18 | 1142 | import agents as ag
def HW2Agent() -> object:
def program(percept):
bump, status = percept
if status == 'Dirty':
action = 'Suck'
else:
lastBump, lastStatus = program.oldPercepts[-1]
lastAction = program.oldActions[-1]
if lastAction == 'Suck' :
action = program.oldActions[-2]
elif (lastAction == 'Right' and bump == 'None'):
action = 'Right'
elif (lastAction == 'Right' and bump == 'Bump'):
action = 'Left'
elif (lastAction == 'Left' and bump == 'None') :
action ='Left'
elif (lastAction == 'Left' and bump == 'Bump') :
action = 'Right'
else:
action = 'Left'
program.oldPercepts.append(percept)
program.oldActions.append(action)
return action
# assign static variables here
program.oldPercepts = [('None', 'Clean')]
program.oldActions = ['Left']
agt = ag.Agent(program)
# assign class attributes here:
# agt.direction = ag.Direction('left')
return agt | mit |
hydai/closure-linter | setup.py | 12 | 1324 | #!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='closure_linter',
version='2.3.17',
description='Closure Linter',
license='Apache',
author='The Closure Linter Authors',
author_email='opensource@google.com',
url='http://code.google.com/p/closure-linter',
install_requires=['python-gflags'],
package_dir={'closure_linter': 'closure_linter'},
packages=['closure_linter', 'closure_linter.common'],
entry_points = {
'console_scripts': [
'gjslint = closure_linter.gjslint:main',
'fixjsstyle = closure_linter.fixjsstyle:main'
]
}
)
| apache-2.0 |
karesansui/karesansui | karesansui/gadget/hostby1staticroute.py | 1 | 6419 | # -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import re
import web
import simplejson as json
import karesansui
from karesansui.lib.rest import Rest, auth
from karesansui.db.access.machine import findbyhost1
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_VALID, CHECK_LENGTH, \
CHECK_CHAR, CHECK_MIN, CHECK_MAX, CHECK_ONLYSPACE, \
CHECK_UNIQUE
from karesansui.lib.utils import is_param, is_empty, preprint_r, \
base64_encode, get_ifconfig_info
from karesansui.lib.networkaddress import NetworkAddress
from karesansui.lib.parser.staticroute import staticrouteParser as Parser
from karesansui.lib.conf import read_conf, write_conf
def validates_staticroute(obj):
checker = Checker()
check = True
_ = obj._
checker.errors = []
if not is_param(obj.input, 'target'):
check = False
checker.add_error(_('Specify target address for the route.'))
else:
check = checker.check_ipaddr(
_('Target'),
obj.input.target,
CHECK_EMPTY | CHECK_VALID,
) and check
if not is_param(obj.input, 'gateway'):
check = False
checker.add_error(_('Specify gateway address for the route.'))
else:
check = checker.check_ipaddr(
_('Gateway'),
obj.input.gateway,
CHECK_VALID,
) and check
obj.view.alert = checker.errors
return check
class HostBy1StaticRoute(Rest):
@auth
def _GET(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
host = findbyhost1(self.orm, host_id)
self.view.host_id = host_id
# unremovable entries
excludes = {
"device": ["^peth","^virbr","^sit","^xenbr","^lo","^br"],
"ipaddr": ["^0\.0\.0\.0$", "^169\.254\.0\.0$"],
}
devices = []
phydev_regex = re.compile(r"^eth[0-9]+")
for dev,dev_info in get_ifconfig_info().iteritems():
if phydev_regex.match(dev):
try:
if dev_info['ipaddr'] is not None:
devices.append(dev)
net = NetworkAddress("%s/%s" % (dev_info['ipaddr'],dev_info['mask'],))
excludes['ipaddr'].append(net.network)
except:
pass
self.view.devices = devices
parser = Parser()
status = parser.do_status()
routes = {}
for _k,_v in status.iteritems():
for _k2,_v2 in _v.iteritems():
name = base64_encode("%s@%s" % (_k2,_k,))
routes[name] = {}
routes[name]['name'] = name
routes[name]['device'] = _k
routes[name]['gateway'] = _v2['gateway']
routes[name]['flags'] = _v2['flags']
routes[name]['ref'] = _v2['ref']
routes[name]['use'] = _v2['use']
net = NetworkAddress(_k2)
routes[name]['ipaddr'] = net.ipaddr
routes[name]['netlen'] = net.netlen
routes[name]['netmask'] = net.netmask
removable = True
for _ex_key,_ex_val in excludes.iteritems():
ex_regex = "|".join(_ex_val)
mm = re.search(ex_regex,routes[name][_ex_key])
if mm:
removable = False
routes[name]['removable'] = removable
self.view.routes = routes
if self.is_mode_input():
pass
return True
@auth
def _POST(self, *param, **params):
host_id = self.chk_hostby1(param)
if host_id is None: return web.notfound()
host = findbyhost1(self.orm, host_id)
if not validates_staticroute(self):
return web.badrequest(self.view.alert)
modules = ["staticroute"]
dop = read_conf(modules, self, host)
if dop is False:
return web.internalerror('Internal Server Error. (Timeout)')
target = self.input.target
net = NetworkAddress(target)
ipaddr = net.ipaddr
netmask = net.netmask
netlen = net.netlen
network = net.network
target = "%s/%s" % (ipaddr,netlen,)
gateway = self.input.gateway
device = self.input.device
dop.set("staticroute", [device,target], gateway)
from karesansui.lib.parser.staticroute import PARSER_COMMAND_ROUTE
if net.netlen == 32:
command = "%s add -host %s gw %s dev %s" % (PARSER_COMMAND_ROUTE,ipaddr,gateway,device,)
command = "%s add -host %s dev %s" % (PARSER_COMMAND_ROUTE,ipaddr,device,)
else:
command = "%s add -net %s netmask %s gw %s dev %s" % (PARSER_COMMAND_ROUTE,network,netmask,gateway,device,)
extra_args = {"post-command": command}
retval = write_conf(dop, self, host, extra_args=extra_args)
if retval is False:
return web.internalerror('Internal Server Error. (Adding Task)')
return web.accepted(url=web.ctx.path)
urls = (
'/host/(\d+)/staticroute[/]?(\.html|\.part|\.json)?$', HostBy1StaticRoute,
)
| mit |
codesparkle/youtube-dl | youtube_dl/extractor/litv.py | 5 | 6252 | # coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
smuggle_url,
unsmuggle_url,
)
class LiTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?litv\.tv/(?:vod|promo)/[^/]+/(?:content\.do)?\?.*?\b(?:content_)?id=(?P<id>[^&]+)'
_URL_TEMPLATE = 'https://www.litv.tv/vod/%s/content.do?id=%s'
_TESTS = [{
'url': 'https://www.litv.tv/vod/drama/content.do?brc_id=root&id=VOD00041610&isUHEnabled=true&autoPlay=1',
'info_dict': {
'id': 'VOD00041606',
'title': '花千骨',
},
'playlist_count': 50,
}, {
'url': 'https://www.litv.tv/vod/drama/content.do?brc_id=root&id=VOD00041610&isUHEnabled=true&autoPlay=1',
'md5': '969e343d9244778cb29acec608e53640',
'info_dict': {
'id': 'VOD00041610',
'ext': 'mp4',
'title': '花千骨第1集',
'thumbnail': 're:https?://.*\.jpg$',
'description': 'md5:c7017aa144c87467c4fb2909c4b05d6f',
'episode_number': 1,
},
'params': {
'noplaylist': True,
},
'skip': 'Georestricted to Taiwan',
}, {
'url': 'https://www.litv.tv/promo/miyuezhuan/?content_id=VOD00044841&',
'md5': '88322ea132f848d6e3e18b32a832b918',
'info_dict': {
'id': 'VOD00044841',
'ext': 'mp4',
'title': '芈月傳第1集 霸星芈月降世楚國',
'description': '楚威王二年,太史令唐昧夜觀星象,發現霸星即將現世。王后得知霸星的預言後,想盡辦法不讓孩子順利出生,幸得莒姬相護化解危機。沒想到眾人期待下出生的霸星卻是位公主,楚威王對此失望至極。楚王后命人將女嬰丟棄河中,居然奇蹟似的被少司命像攔下,楚威王認為此女非同凡響,為她取名芈月。',
},
'skip': 'Georestricted to Taiwan',
}]
def _extract_playlist(self, season_list, video_id, program_info, prompt=True):
episode_title = program_info['title']
content_id = season_list['contentId']
if prompt:
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (content_id, video_id))
all_episodes = [
self.url_result(smuggle_url(
self._URL_TEMPLATE % (program_info['contentType'], episode['contentId']),
{'force_noplaylist': True})) # To prevent infinite recursion
for episode in season_list['episode']]
return self.playlist_result(all_episodes, content_id, episode_title)
def _real_extract(self, url):
url, data = unsmuggle_url(url, {})
video_id = self._match_id(url)
noplaylist = self._downloader.params.get('noplaylist')
noplaylist_prompt = True
if 'force_noplaylist' in data:
noplaylist = data['force_noplaylist']
noplaylist_prompt = False
webpage = self._download_webpage(url, video_id)
program_info = self._parse_json(self._search_regex(
'var\s+programInfo\s*=\s*([^;]+)', webpage, 'VOD data', default='{}'),
video_id)
season_list = list(program_info.get('seasonList', {}).values())
if season_list:
if not noplaylist:
return self._extract_playlist(
season_list[0], video_id, program_info,
prompt=noplaylist_prompt)
if noplaylist_prompt:
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
# In browsers `getMainUrl` request is always issued. Usually this
# endpoint gives the same result as the data embedded in the webpage.
# If georestricted, there are no embedded data, so an extra request is
# necessary to get the error code
if 'assetId' not in program_info:
program_info = self._download_json(
'https://www.litv.tv/vod/ajax/getProgramInfo', video_id,
query={'contentId': video_id},
headers={'Accept': 'application/json'})
video_data = self._parse_json(self._search_regex(
r'uiHlsUrl\s*=\s*testBackendData\(([^;]+)\);',
webpage, 'video data', default='{}'), video_id)
if not video_data:
payload = {
'assetId': program_info['assetId'],
'watchDevices': program_info['watchDevices'],
'contentType': program_info['contentType'],
}
video_data = self._download_json(
'https://www.litv.tv/vod/getMainUrl', video_id,
data=json.dumps(payload).encode('utf-8'),
headers={'Content-Type': 'application/json'})
if not video_data.get('fullpath'):
error_msg = video_data.get('errorMessage')
if error_msg == 'vod.error.outsideregionerror':
self.raise_geo_restricted('This video is available in Taiwan only')
if error_msg:
raise ExtractorError('%s said: %s' % (self.IE_NAME, error_msg), expected=True)
raise ExtractorError('Unexpected result from %s' % self.IE_NAME)
formats = self._extract_m3u8_formats(
video_data['fullpath'], video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls')
for a_format in formats:
# LiTV HLS segments doesn't like compressions
a_format.setdefault('http_headers', {})['Youtubedl-no-compression'] = True
title = program_info['title'] + program_info.get('secondaryMark', '')
description = program_info.get('description')
thumbnail = program_info.get('imageFile')
categories = [item['name'] for item in program_info.get('category', [])]
episode = int_or_none(program_info.get('episode'))
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
'categories': categories,
'episode_number': episode,
}
| unlicense |
dlu-ch/dlb | test/dlb_contrib/test_git.py | 1 | 21925 | # SPDX-License-Identifier: LGPL-3.0-or-later
# dlb - a Pythonic build tool
# Copyright (C) 2020 Daniel Lutz <dlu-ch@users.noreply.github.com>
import testenv # also sets up module search paths
import dlb.di
import dlb.fs
import dlb.ex
import dlb_contrib.generic
import dlb_contrib.git
import dlb_contrib.sh
import os.path
import tempfile
import subprocess
import re
import unittest
class PrepareGitRepo(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = """
git init
git config user.email "dlu-ch@users.noreply.github.com"
git config user.name "dlu-ch"
git add .dlbroot/o
echo .dlbroot/ > .gitignore
echo x > x
git add x .gitignore
git commit -m 'Initial commit'
echo x >> x
git commit -a -m 'Enlarge x'
git tag -a v1.2.3c4 -m 'Release'
echo x >> x
git commit -a -m 'Enlarge x even further'
mkdir d
echo y > d/y
git add d/y
echo z > d/z
git add d/z
echo a > 'a -> b'
git add 'a -> b'
git commit -m 'Add files'
git mv x 'y -> z'
git mv 'a -> b' c
git mv d e
git mv e/y why
echo u > e/u
"""
# each annotated tag starting with 'v' followed by a decimal digit must match this (after 'v'):
VERSION_REGEX = re.compile(
r'^'
r'(?P<major>0|[1-9][0-9]*)\.(?P<minor>0|[1-9][0-9]*)\.(?P<micro>0|[1-9][0-9]*)'
r'((?P<post>[abc])(?P<post_number>0|[1-9][0-9]*))?'
r'$')
class ModificationsFromStatusTest(unittest.TestCase):
def test_branch_header(self):
lines = [
'# branch.oid b5fb8c02a485f9f7a5d4aee95848bf9c9d2b0f7f',
'# branch.head "äüä"',
'# branch.upstream origin/master',
'# branch.ab +12 -3'
]
_, _, branch_refname, upstream_branch_refname, before_upstream, behind_upstream = \
dlb_contrib.git.modifications_from_status(lines)
self.assertEqual('refs/heads/"äüä"', branch_refname)
self.assertEqual('refs/remotes/origin/master', upstream_branch_refname)
self.assertEqual((12, 3), (before_upstream, behind_upstream))
lines = [
'# branch.oid b5fb8c02a485f9f7a5d4aee95848bf9c9d2b0f7f',
'# branch.head (detached)'
]
_, _, branch_refname, upstream_branch_refname, before_upstream, behind_upstream = \
dlb_contrib.git.modifications_from_status(lines)
self.assertEqual('refs/heads/(detached)', branch_refname) # is ambiguous
self.assertIsNone(upstream_branch_refname)
self.assertIsNone(before_upstream)
self.assertIsNone(behind_upstream)
def test_single_non_header_line(self):
line = (
'1 .M N... 100644 100644 100644 '
'd8755f8b2ede3dc58822895fa85e0e51c8f20dda d8755f8b2ede3dc58822895fa85e0e51c8f20dda jöö/herzig'
)
self.assertEqual({dlb.fs.Path('jöö/herzig'): (' M', None)},
dlb_contrib.git.modifications_from_status([line])[0])
line = (
'1 A. N... 000000 100644 100644 '
'0000000000000000000000000000000000000000 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 "a\\tb\\nc\\"\'d "'
)
self.assertEqual({dlb.fs.Path('a\tb\nc"\'d '): ('A ', None)},
dlb_contrib.git.modifications_from_status([line])[0])
line = (
'2 R. N... 100644 100644 100644 '
'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 R100 a\tb'
)
self.assertEqual({dlb.fs.Path('b'): ('R ', dlb.fs.Path('a'))},
dlb_contrib.git.modifications_from_status([line])[0])
line = (
'2 R. N... 100644 100644 100644 '
'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 R100 "a\\"b"\ta -> b'
)
self.assertEqual({dlb.fs.Path('a -> b'): ('R ', dlb.fs.Path('a"b'))},
dlb_contrib.git.modifications_from_status([line])[0])
line = (
'2 R. N... 100644 100644 100644 '
'e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 R100 '
'a\t"a\\tb\\nc\\"\'d "'
)
self.assertEqual({dlb.fs.Path('a\tb\nc"\'d '): ('R ', dlb.fs.Path('a'))},
dlb_contrib.git.modifications_from_status([line])[0])
self.assertEqual({dlb.fs.Path('a')},
dlb_contrib.git.modifications_from_status(['? a'])[1])
self.assertEqual({dlb.fs.Path('a\tb\nc"\'d ')},
dlb_contrib.git.modifications_from_status(['? "a\\tb\\nc\\"\'d "'])[1])
def test_fails_on_invalid_line(self):
with self.assertRaises(ValueError):
dlb_contrib.git.modifications_from_status(['# branch.ab +0'])
with self.assertRaises(ValueError):
dlb_contrib.git.modifications_from_status(['1 A.'])
with self.assertRaises(ValueError):
dlb_contrib.git.modifications_from_status(['2 R.'])
class CheckRefNameTest(unittest.TestCase):
def test_empty_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('')
self.assertEqual(str(cm.exception), 'refname component must not be empty')
def test_single_slashes_are_valid(self):
dlb_contrib.git.check_refname('a/b/c')
def test_consecutive_slashes_are_valid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a//b')
self.assertEqual(str(cm.exception), 'refname component must not be empty')
def test_single_dot_in_the_middle_is_valid(self):
dlb_contrib.git.check_refname('a/b.c')
def test_at_at_certain_position_is_valid(self):
dlb_contrib.git.check_refname('a/{@}/b')
def test_single_at_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a/@/b')
self.assertEqual(str(cm.exception), "refname component must not be '@'")
def test_at_followed_by_brace_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a@{b')
self.assertEqual(str(cm.exception), "refname component must not contain '@{'")
def test_double_dot_in_the_middle_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a/b..c')
self.assertEqual(str(cm.exception), "refname component must not contain '..'")
def test_control_character_is_invalid(self):
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a\0b')
self.assertEqual(str(cm.exception), "refname component must not contain ASCII control character")
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a\nb')
self.assertEqual(str(cm.exception), "refname component must not contain ASCII control character")
with self.assertRaises(ValueError) as cm:
dlb_contrib.git.check_refname('a\x7Fb')
self.assertEqual(str(cm.exception), "refname component must not contain ASCII control character")
class DescribeWorkingDirectory(dlb_contrib.git.GitDescribeWorkingDirectory):
SHORTENED_COMMIT_HASH_LENGTH = 8 # number of characters of the SHA1 commit hash in the *wd_version*
# working directory version
# examples: '1.2.3', '1.2.3c4-dev5+deadbeef?'
wd_version = dlb.ex.output.Object(explicit=False)
# tuple of the version according to the version tag
version_components = dlb.ex.output.Object(explicit=False)
async def redo(self, result, context):
await super().redo(result, context)
shortened_commit_hash_length = min(40, max(1, int(self.SHORTENED_COMMIT_HASH_LENGTH)))
version = result.tag_name[1:]
m = VERSION_REGEX.fullmatch(version)
if not m:
raise ValueError(f'annotated tag is not a valid version number: {result.tag_name!r}')
wd_version = version
if result.commit_number_from_tag_to_latest_commit:
wd_version += f'-dev{result.commit_number_from_tag_to_latest_commit}' \
f'+{result.latest_commit_hash[:shortened_commit_hash_length]}'
if result.has_changes_in_tracked_files:
wd_version += '?'
result.wd_version = wd_version
result.version_components = (
int(m.group('major')), int(m.group('minor')), int(m.group('micro')),
m.group('post'), None if m.group('post_number') is None else int(m.group('post_number'))
)
return True
@unittest.skipIf(not testenv.has_executable_in_path('git'), 'requires git in $PATH')
@unittest.skipIf(not testenv.has_executable_in_path('sh'), 'requires sh in $PATH')
class GitDescribeWorkingDirectoryTest(testenv.TemporaryWorkingDirectoryTestCase):
def test_line_output(self):
with dlb.ex.Context():
class AddLightWeightTag(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = 'git tag v2' # light-weight tag does not affect 'git describe'
PrepareGitRepo().start().complete()
AddLightWeightTag().start().complete()
result = DescribeWorkingDirectory().start()
dlb.di.inform(f"version: {result.version_components!r}, wd version: {result.wd_version!r}")
dlb.di.inform(f"changed: {result.modification_by_file.keys()!r}")
self.assertEqual({
dlb.fs.Path('a -> b'): ('R ', dlb.fs.Path('c')),
dlb.fs.Path('d/y'): ('R ', dlb.fs.Path('why')),
dlb.fs.Path('d/z'): ('R ', dlb.fs.Path('e/z')),
dlb.fs.Path('x'): ('R ', dlb.fs.Path('y -> z'))
}, result.modification_by_file)
self.assertEqual({dlb.fs.Path('e/u')}, result.untracked_files)
self.assertEqual((1, 2, 3, 'c', 4), result.version_components)
self.assertRegex(result.wd_version, r'1\.2\.3c4-dev2\+[0-9a-f]{8}\?$')
self.assertEqual('refs/heads/master', result.branch_refname)
with dlb.ex.Context():
class CommitGitRepo(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = 'git commit -a -m 0'
CommitGitRepo().start()
result = DescribeWorkingDirectory().start()
self.assertEqual({}, result.modification_by_file)
self.assertEqual({dlb.fs.Path('e/u')}, result.untracked_files)
self.assertEqual((1, 2, 3, 'c', 4), result.version_components)
self.assertRegex(result.wd_version, r'1\.2\.3c4-dev3\+[0-9a-f]{8}$')
with dlb.ex.Context():
class CheckoutBranch(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = 'git checkout -f -b "(detached)"'
CheckoutBranch().start()
result = DescribeWorkingDirectory().start()
self.assertEqual('refs/heads/(detached)', result.branch_refname)
self.assertRegex(result.wd_version, r'1\.2\.3c4-dev3\+[0-9a-f]{8}$')
with dlb.ex.Context():
class CheckoutDetached(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = 'git checkout --detach'
CheckoutDetached().start()
result = DescribeWorkingDirectory().start()
self.assertIsNone(result.branch_refname)
self.assertRegex(result.wd_version, r'1\.2\.3c4-dev3\+[0-9a-f]{8}$')
def test_gitignore_can_hide_every_modification(self):
class PrepareRepoWithHiddenModifications(dlb_contrib.sh.ShScriptlet):
SCRIPTLET = """
git init
git config user.email "dlu-ch@users.noreply.github.com"
git config user.name "dlu-ch"
echo x > x
git add x
git commit -m 'Initial commit'
git tag -a v0.0.0 -m 'Initial tag'
echo .gitignore > .gitignore
echo .dlbroot >> .gitignore
echo ignored >> .gitignore
touch ignored
"""
with dlb.ex.Context():
PrepareRepoWithHiddenModifications().start().complete()
result = DescribeWorkingDirectory().start()
self.assertEqual({}, result.modification_by_file)
class DefaultVersionTagTest(unittest.TestCase):
REGEX = re.compile(dlb_contrib.git.GitCheckTags.ANNOTATED_TAG_NAME_REGEX)
def test_fails_for_empty(self):
self.assertFalse(self.REGEX.fullmatch(''))
def test_fails_for_missing_v(self):
self.assertFalse(self.REGEX.fullmatch('1.2.3'))
def test_fails_for_leading_zero(self):
self.assertFalse(self.REGEX.fullmatch('v01.2.3'))
self.assertFalse(self.REGEX.fullmatch('v1.02.3'))
self.assertFalse(self.REGEX.fullmatch('v1.02.03'))
def test_matches_dotted_integers(self):
self.assertTrue(self.REGEX.fullmatch('v1'))
self.assertTrue(self.REGEX.fullmatch('v1.2'))
self.assertTrue(self.REGEX.fullmatch('v1.2.3'))
self.assertTrue(self.REGEX.fullmatch('v1.20.345.6789'))
self.assertTrue(self.REGEX.fullmatch('v0.0.0'))
def test_fails_without_trailing_decimal_digit(self):
self.assertFalse(self.REGEX.fullmatch('v1.2.3pre'))
def test_matches_dotted_integers_with_suffix(self):
self.assertTrue(self.REGEX.fullmatch('v1.2.3a4'))
self.assertTrue(self.REGEX.fullmatch('v1.2.3rc0'))
self.assertTrue(self.REGEX.fullmatch('v1.2.3patch747'))
@unittest.skipIf(not testenv.has_executable_in_path('git'), 'requires git in $PATH')
@unittest.skipIf(not testenv.has_executable_in_path('sh'), 'requires sh in $PATH')
class GitCheckTagsTest(testenv.TemporaryWorkingDirectoryTestCase):
def test_local_only(self):
class GitCheckTags(dlb_contrib.git.GitCheckTags):
REMOTE_NAME_TO_SYNC_CHECK = ''
class GitCheckTags2(GitCheckTags):
LIGHTWEIGHT_TAG_NAME_REGEX = 'latest_.*'
with dlb.ex.Context():
PrepareGitRepo().start().complete()
subprocess.check_output(['git', 'tag', '-a', 'v2.0.0', '-m', 'Release'])
subprocess.check_output(['git', 'tag', 'vw'])
result = GitCheckTags().start()
self.assertEqual({'v1.2.3c4', 'v2.0.0'}, set(result.commit_by_annotated_tag_name))
self.assertEqual({'vw'}, set(result.commit_by_lightweight_tag_name))
with dlb.ex.Context():
output = subprocess.check_output(['git', 'rev-parse', 'v1.2.3c4^{}', 'v2.0.0^{}', 'vw'])
commit_hashes = output.decode().splitlines()
self.assertEqual({
'v1.2.3c4': commit_hashes[0],
'v2.0.0': commit_hashes[1]
}, result.commit_by_annotated_tag_name)
self.assertEqual({
'vw': commit_hashes[2]
}, result.commit_by_lightweight_tag_name)
with dlb.ex.Context():
subprocess.check_output(['git', 'tag', 'v2'])
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "name of lightweight tag does match 'ANNOTATED_TAG_NAME_REGEX': 'v2'"
self.assertEqual(msg, str(cm.exception))
with dlb.ex.Context():
subprocess.check_output(['git', 'tag', '-d', 'v2'])
subprocess.check_output(['git', 'tag', '-a', 'v_3.0', '-m', 'Release'])
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "name of annotated tag does not match 'ANNOTATED_TAG_NAME_REGEX': 'v_3.0'"
self.assertEqual(msg, str(cm.exception))
with dlb.ex.Context():
subprocess.check_output(['git', 'tag', '-d', 'v_3.0'])
with self.assertRaises(ValueError) as cm:
GitCheckTags2().start().complete()
msg = "name of lightweight tag does not match 'LIGHTWEIGHT_TAG_NAME_REGEX': 'vw'"
self.assertEqual(msg, str(cm.exception))
def test_remote_too(self):
class GitCheckTags(dlb_contrib.git.GitCheckTags):
pass
class GitCheckTags2(GitCheckTags):
DO_SYNC_CHECK_LIGHTWEIGHT_TAGS = True
origin_repo_dir = os.path.abspath(tempfile.mkdtemp())
with testenv.DirectoryChanger(origin_repo_dir):
subprocess.check_output(['git', 'init'])
subprocess.check_output(['git', 'config', 'user.email', 'dlu-ch@users.noreply.github.com'])
subprocess.check_output(['git', 'config', 'user.name', 'user.name'])
subprocess.check_output(['touch', 'x'])
subprocess.check_output(['git', 'add', 'x'])
subprocess.check_output(['git', 'commit', '-m', 'Initial commit'])
subprocess.check_output(['git', 'tag', '-a', 'v1.2.3c4', '-m', 'Release'])
subprocess.check_output(['touch', 'y'])
subprocess.check_output(['git', 'add', 'y'])
subprocess.check_output(['git', 'commit', '-m', 'Add y'])
subprocess.check_output(['git', 'tag', '-a', 'v2.0.0', '-m', 'Release'])
subprocess.check_output(['git', 'tag', '-a', 'v2.0.1', '-m', 'Release'])
subprocess.check_output(['git', 'tag', 'vm'])
subprocess.check_output(['git', 'tag', 'v'])
subprocess.check_output(['git', 'tag', 'w'])
subprocess.check_output(['git', 'init'])
subprocess.check_output(['touch', 'x'])
subprocess.check_output(['git', 'add', 'x'])
subprocess.check_output(['git', 'commit', '-m', 'Initial commit'])
subprocess.check_output(['git', 'remote', 'add', 'origin', origin_repo_dir])
subprocess.check_output(['git', 'fetch'])
subprocess.check_output(['git', 'fetch', '--tags'])
with dlb.ex.Context():
GitCheckTags().start()
with dlb.ex.Context():
subprocess.check_output(['git', 'tag', '-d', 'vm'])
subprocess.check_output(['git', 'tag', '-d', 'v'])
GitCheckTags().start() # do not sync lightweight tags by default
with self.assertRaises(ValueError) as cm:
GitCheckTags2().start().complete()
msg = "remote tags missing locally: 'v', 'vm'"
self.assertEqual(msg, str(cm.exception))
subprocess.check_output(['git', 'tag', '-d', 'v1.2.3c4'])
subprocess.check_output(['git', 'tag', '-d', 'v2.0.1'])
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "remote tags missing locally: 'v1.2.3c4', 'v2.0.1'"
self.assertEqual(msg, str(cm.exception))
subprocess.check_output(['git', 'tag', '-a', 'v1.2.3c4', '-m', 'Release']) # different commit
subprocess.check_output(['git', 'tag', '-a', 'v2.0.1', '-m', 'Release']) # different commit
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "tags for different commits locally and remotely: 'v1.2.3c4', 'v2.0.1'"
self.assertEqual(msg, str(cm.exception))
subprocess.check_output(['git', 'tag', '-a', 'v3.0.0', '-m', 'Release'])
subprocess.check_output(['git', 'tag', '-a', 'v3.0.1', '-m', 'Release'])
with self.assertRaises(ValueError) as cm:
GitCheckTags().start().complete()
msg = "local tags missing on remotely: 'v3.0.0', 'v3.0.1'"
self.assertEqual(msg, str(cm.exception))
def test_example(self):
origin_repo_dir = os.path.abspath(tempfile.mkdtemp())
with testenv.DirectoryChanger(origin_repo_dir):
subprocess.check_output(['git', 'init'])
subprocess.check_output(['git', 'config', 'user.email', 'dlu-ch@users.noreply.github.com'])
subprocess.check_output(['git', 'config', 'user.name', 'user.name'])
subprocess.check_output(['touch', 'x'])
subprocess.check_output(['git', 'add', 'x'])
subprocess.check_output(['git', 'commit', '-m', 'Initial commit'])
subprocess.check_output(['git', 'tag', '-a', 'v1.2.3', '-m', 'Release'])
subprocess.check_output(['git', 'init'])
subprocess.check_output(['git', 'remote', 'add', 'origin', origin_repo_dir])
subprocess.check_output(['git', 'fetch'])
subprocess.check_output(['git', 'fetch', '--tags'])
with dlb.ex.Context():
class GitCheckTags(dlb_contrib.git.GitCheckTags):
ANNOTATED_TAG_NAME_REGEX = r'v(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*)){2}' # e.g. 'v1.23.0'
version_tag_names = set(GitCheckTags().start().commit_by_annotated_tag_name)
self.assertEquals({'v1.2.3'}, version_tag_names)
@unittest.skipIf(not testenv.has_executable_in_path('git'), 'requires git in $PATH')
class VersionTest(testenv.TemporaryWorkingDirectoryTestCase):
def test_version_is_string_with_dot(self):
# noinspection PyPep8Naming
Tools = [
dlb_contrib.git.GitDescribeWorkingDirectory,
dlb_contrib.git.GitCheckTags
]
class QueryVersion(dlb_contrib.generic.VersionQuery):
VERSION_PARAMETERS_BY_EXECUTABLE = {
Tool.EXECUTABLE: Tool.VERSION_PARAMETERS
for Tool in Tools
}
with dlb.ex.Context():
version_by_path = QueryVersion().start().version_by_path
self.assertEqual(len(QueryVersion.VERSION_PARAMETERS_BY_EXECUTABLE), len(version_by_path))
for Tool in Tools:
path = dlb.ex.Context.active.helper[Tool.EXECUTABLE]
version = version_by_path[path]
self.assertIsInstance(version, str)
self.assertGreaterEqual(version.count('.'), 2)
| gpl-3.0 |
t-tran/libcloud | libcloud/test/common/test_google.py | 11 | 15804 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for Google Connection classes.
"""
import datetime
import mock
import os
import sys
import unittest
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.google import (GoogleAuthError,
GoogleAuthType,
GoogleBaseAuthConnection,
GoogleInstalledAppAuthConnection,
GoogleServiceAcctAuthConnection,
GoogleGCEServiceAcctAuthConnection,
GoogleOAuth2Credential,
GoogleBaseConnection,
_utcnow,
_utc_timestamp)
from libcloud.test import MockHttp, LibcloudTestCase
from libcloud.utils.py3 import httplib
# Skip some tests if PyCrypto is unavailable
try:
from Crypto.Hash import SHA256
except ImportError:
SHA256 = None
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
PEM_KEY = os.path.join(SCRIPT_PATH, 'fixtures', 'google', 'pkey.pem')
JSON_KEY = os.path.join(SCRIPT_PATH, 'fixtures', 'google', 'pkey.json')
with open(JSON_KEY, 'r') as f:
KEY_STR = json.loads(f.read())['private_key']
GCE_PARAMS = ('email@developer.gserviceaccount.com', 'key')
GCE_PARAMS_PEM_KEY = ('email@developer.gserviceaccount.com', PEM_KEY)
GCE_PARAMS_JSON_KEY = ('email@developer.gserviceaccount.com', JSON_KEY)
GCE_PARAMS_KEY = ('email@developer.gserviceaccount.com', KEY_STR)
GCE_PARAMS_IA = ('client_id', 'client_secret')
GCE_PARAMS_GCE = ('foo', 'bar')
GCS_S3_PARAMS = ('GOOG0123456789ABCXYZ', # GOOG + 16 alphanumeric chars
'0102030405060708091011121314151617181920') # 40 base64 chars
STUB_UTCNOW = _utcnow()
STUB_TOKEN = {
'access_token': 'tokentoken',
'token_type': 'Bearer',
'expires_in': 3600
}
STUB_IA_TOKEN = {
'access_token': 'installedapp',
'token_type': 'Bearer',
'expires_in': 3600,
'refresh_token': 'refreshrefresh'
}
STUB_REFRESH_TOKEN = {
'access_token': 'refreshrefresh',
'token_type': 'Bearer',
'expires_in': 3600
}
STUB_TOKEN_FROM_FILE = {
'access_token': 'token_from_file',
'token_type': 'Bearer',
'expire_time': _utc_timestamp(STUB_UTCNOW +
datetime.timedelta(seconds=3600)),
'expires_in': 3600
}
class MockJsonResponse(object):
def __init__(self, body):
self.object = body
class GoogleTestCase(LibcloudTestCase):
"""
Assists in making Google tests hermetic and deterministic.
Add anything that needs to be mocked here. Create a patcher with the
suffix '_patcher'.
e.g.
_foo_patcher = mock.patch('module.submodule.class.foo', ...)
Patchers are started at setUpClass and stopped at tearDownClass.
Ideally, you should make a note in the thing being mocked, for clarity.
"""
PATCHER_SUFFIX = '_patcher'
_utcnow_patcher = mock.patch(
'libcloud.common.google._utcnow', return_value=STUB_UTCNOW)
_authtype_is_gce_patcher = mock.patch(
'libcloud.common.google.GoogleAuthType._is_gce', return_value=False)
_read_token_file_patcher = mock.patch(
'libcloud.common.google.GoogleOAuth2Credential._get_token_from_file',
return_value=STUB_TOKEN_FROM_FILE
)
_write_token_file_patcher = mock.patch(
'libcloud.common.google.GoogleOAuth2Credential._write_token_to_file')
_ia_get_code_patcher = mock.patch(
'libcloud.common.google.GoogleInstalledAppAuthConnection.get_code',
return_value=1234
)
@classmethod
def setUpClass(cls):
super(GoogleTestCase, cls).setUpClass()
for patcher in [a for a in dir(cls) if a.endswith(cls.PATCHER_SUFFIX)]:
getattr(cls, patcher).start()
@classmethod
def tearDownClass(cls):
super(GoogleTestCase, cls).tearDownClass()
for patcher in [a for a in dir(cls) if a.endswith(cls.PATCHER_SUFFIX)]:
getattr(cls, patcher).stop()
class GoogleBaseAuthConnectionTest(GoogleTestCase):
"""
Tests for GoogleBaseAuthConnection
"""
def setUp(self):
GoogleBaseAuthConnection.conn_class = GoogleAuthMockHttp
self.mock_scopes = ['foo', 'bar']
kwargs = {'scopes': self.mock_scopes}
self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS,
**kwargs)
def test_scopes(self):
self.assertEqual(self.conn.scopes, 'foo bar')
def test_add_default_headers(self):
old_headers = {}
expected_headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'accounts.google.com'}
new_headers = self.conn.add_default_headers(old_headers)
self.assertEqual(new_headers, expected_headers)
def test_token_request(self):
request_body = {'code': 'asdf', 'client_id': self.conn.user_id,
'client_secret': self.conn.key,
'redirect_uri': self.conn.redirect_uri,
'grant_type': 'authorization_code'}
new_token = self.conn._token_request(request_body)
self.assertEqual(new_token['access_token'],
STUB_IA_TOKEN['access_token'])
exp = STUB_UTCNOW + datetime.timedelta(
seconds=STUB_IA_TOKEN['expires_in'])
self.assertEqual(new_token['expire_time'], _utc_timestamp(exp))
class GoogleInstalledAppAuthConnectionTest(GoogleTestCase):
"""
Tests for GoogleInstalledAppAuthConnection
"""
def setUp(self):
GoogleInstalledAppAuthConnection.conn_class = GoogleAuthMockHttp
self.mock_scopes = ['https://www.googleapis.com/auth/foo']
kwargs = {'scopes': self.mock_scopes}
self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS,
**kwargs)
def test_refresh_token(self):
# This token info doesn't have a refresh token, so a new token will be
# requested
token_info1 = {'access_token': 'tokentoken', 'token_type': 'Bearer',
'expires_in': 3600}
new_token1 = self.conn.refresh_token(token_info1)
self.assertEqual(new_token1['access_token'],
STUB_IA_TOKEN['access_token'])
# This token info has a refresh token, so it will be able to be
# refreshed.
token_info2 = {'access_token': 'tokentoken', 'token_type': 'Bearer',
'expires_in': 3600, 'refresh_token': 'refreshrefresh'}
new_token2 = self.conn.refresh_token(token_info2)
self.assertEqual(new_token2['access_token'],
STUB_REFRESH_TOKEN['access_token'])
# Both sets should have refresh info
self.assertTrue('refresh_token' in new_token1)
self.assertTrue('refresh_token' in new_token2)
class GoogleAuthTypeTest(GoogleTestCase):
def test_guess(self):
self.assertEqual(GoogleAuthType.guess_type(GCE_PARAMS_IA[0]),
GoogleAuthType.IA)
with mock.patch.object(GoogleAuthType, '_is_gce', return_value=True):
# Since _is_gce currently depends on the environment, not on
# parameters, other auths should override GCE. It does not make
# sense for IA auth to happen on GCE, which is why it's left out.
self.assertEqual(GoogleAuthType.guess_type(GCE_PARAMS[0]),
GoogleAuthType.SA)
self.assertEqual(
GoogleAuthType.guess_type(GCS_S3_PARAMS[0]),
GoogleAuthType.GCS_S3)
self.assertEqual(GoogleAuthType.guess_type(GCE_PARAMS_GCE[0]),
GoogleAuthType.GCE)
class GoogleOAuth2CredentialTest(GoogleTestCase):
def test_init_oauth2(self):
kwargs = {'auth_type': GoogleAuthType.IA}
cred = GoogleOAuth2Credential(*GCE_PARAMS, **kwargs)
# If there is a viable token file, this gets used first
self.assertEqual(cred.token, STUB_TOKEN_FROM_FILE)
# No token file, get a new token. Check that it gets written to file.
with mock.patch.object(GoogleOAuth2Credential, '_get_token_from_file',
return_value=None):
cred = GoogleOAuth2Credential(*GCE_PARAMS, **kwargs)
expected = STUB_IA_TOKEN
expected['expire_time'] = cred.token['expire_time']
self.assertEqual(cred.token, expected)
cred._write_token_to_file.assert_called_once_with()
def test_refresh(self):
args = list(GCE_PARAMS) + [GoogleAuthType.GCE]
cred = GoogleOAuth2Credential(*args)
cred._refresh_token = mock.Mock()
# Test getting an unexpired access token.
tomorrow = datetime.datetime.now() + datetime.timedelta(days=1)
cred.token = {'access_token': 'Access Token!',
'expire_time': _utc_timestamp(tomorrow)}
cred.access_token
self.assertFalse(cred._refresh_token.called)
# Test getting an expired access token.
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
cred.token = {'access_token': 'Access Token!',
'expire_time': _utc_timestamp(yesterday)}
cred.access_token
self.assertTrue(cred._refresh_token.called)
def test_auth_connection(self):
# Test a bogus auth type
self.assertRaises(GoogleAuthError, GoogleOAuth2Credential, *GCE_PARAMS,
**{'auth_type': 'XX'})
# Try to create an OAuth2 credential when dealing with a GCS S3
# interoperability auth type.
self.assertRaises(GoogleAuthError, GoogleOAuth2Credential, *GCE_PARAMS,
**{'auth_type': GoogleAuthType.GCS_S3})
kwargs = {}
if SHA256:
kwargs['auth_type'] = GoogleAuthType.SA
cred1 = GoogleOAuth2Credential(*GCE_PARAMS_PEM_KEY, **kwargs)
self.assertTrue(isinstance(cred1.oauth2_conn,
GoogleServiceAcctAuthConnection))
cred1 = GoogleOAuth2Credential(*GCE_PARAMS_JSON_KEY, **kwargs)
self.assertTrue(isinstance(cred1.oauth2_conn,
GoogleServiceAcctAuthConnection))
cred1 = GoogleOAuth2Credential(*GCE_PARAMS_KEY, **kwargs)
self.assertTrue(isinstance(cred1.oauth2_conn,
GoogleServiceAcctAuthConnection))
kwargs['auth_type'] = GoogleAuthType.IA
cred2 = GoogleOAuth2Credential(*GCE_PARAMS_IA, **kwargs)
self.assertTrue(isinstance(cred2.oauth2_conn,
GoogleInstalledAppAuthConnection))
kwargs['auth_type'] = GoogleAuthType.GCE
cred3 = GoogleOAuth2Credential(*GCE_PARAMS_GCE, **kwargs)
self.assertTrue(isinstance(cred3.oauth2_conn,
GoogleGCEServiceAcctAuthConnection))
class GoogleBaseConnectionTest(GoogleTestCase):
"""
Tests for GoogleBaseConnection
"""
def setUp(self):
GoogleBaseAuthConnection.conn_class = GoogleAuthMockHttp
self.mock_scopes = ['https://www.googleapis.com/auth/foo']
kwargs = {'scopes': self.mock_scopes,
'auth_type': GoogleAuthType.IA}
self.conn = GoogleBaseConnection(*GCE_PARAMS, **kwargs)
def test_add_default_headers(self):
old_headers = {}
new_expected_headers = {'Content-Type': 'application/json',
'Host': 'www.googleapis.com'}
new_headers = self.conn.add_default_headers(old_headers)
self.assertEqual(new_headers, new_expected_headers)
def test_pre_connect_hook(self):
old_params = {}
old_headers = {}
auth_str = '%s %s' % (STUB_TOKEN_FROM_FILE['token_type'],
STUB_TOKEN_FROM_FILE['access_token'])
new_expected_params = {}
new_expected_headers = {'Authorization': auth_str}
new_params, new_headers = self.conn.pre_connect_hook(old_params,
old_headers)
self.assertEqual(new_params, new_expected_params)
self.assertEqual(new_headers, new_expected_headers)
def test_encode_data(self):
data = {'key': 'value'}
json_data = '{"key": "value"}'
encoded_data = self.conn.encode_data(data)
self.assertEqual(encoded_data, json_data)
def test_has_completed(self):
body1 = {"endTime": "2013-06-26T10:05:07.630-07:00",
"id": "3681664092089171723",
"kind": "compute#operation",
"status": "DONE",
"targetId": "16211908079305042870"}
body2 = {"endTime": "2013-06-26T10:05:07.630-07:00",
"id": "3681664092089171723",
"kind": "compute#operation",
"status": "RUNNING",
"targetId": "16211908079305042870"}
response1 = MockJsonResponse(body1)
response2 = MockJsonResponse(body2)
self.assertTrue(self.conn.has_completed(response1))
self.assertFalse(self.conn.has_completed(response2))
def test_get_poll_request_kwargs(self):
body = {"endTime": "2013-06-26T10:05:07.630-07:00",
"id": "3681664092089171723",
"kind": "compute#operation",
"selfLink": "https://www.googleapis.com/operations-test"}
response = MockJsonResponse(body)
expected_kwargs = {'action':
'https://www.googleapis.com/operations-test'}
kwargs = self.conn.get_poll_request_kwargs(response, None, {})
self.assertEqual(kwargs, expected_kwargs)
def test_morph_action_hook(self):
self.conn.request_path = '/compute/apiver/project/project-name'
action1 = ('https://www.googleapis.com/compute/apiver/project'
'/project-name/instances')
action2 = '/instances'
expected_request = '/compute/apiver/project/project-name/instances'
request1 = self.conn.morph_action_hook(action1)
request2 = self.conn.morph_action_hook(action2)
self.assertEqual(request1, expected_request)
self.assertEqual(request2, expected_request)
class GoogleAuthMockHttp(MockHttp):
"""
Mock HTTP Class for Google Auth Connections.
"""
json_hdr = {'content-type': 'application/json; charset=UTF-8'}
def _o_oauth2_token(self, method, url, body, headers):
if 'code' in body:
body = json.dumps(STUB_IA_TOKEN)
elif 'refresh_token' in body:
body = json.dumps(STUB_REFRESH_TOKEN)
else:
body = json.dumps(STUB_TOKEN)
return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
MarkWaldron/Challenges | Programs/02-99-Bottles-of-Beer/solutions/askl56-bottles-of-beer.py | 2 | 1028 | ones = (
'', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine'
)
prefixes = ('thir', 'four', 'fif', 'six', 'seven', 'eigh', 'nine')
tens = ['', '', 'twenty' ]
teens = ['ten', 'eleven', 'twelve']
for prefix in prefixes:
tens.append(prefix + 'ty')
teens.append(prefix +'teen')
tens[4] = 'forty'
def number(num):
"get the wordy version of a number"
ten, one = divmod(num, 10)
if ten == 0 and one == 0:
return 'no'
elif ten == 0:
return ones[one]
elif ten == 1:
return teens[one]
elif one == 0:
return tens[ten]
else:
return "%s-%s" % (tens[ten], ones[one])
def bottles(beer):
"our rephrase"
return "%s bottle%s of beer" % (
number(beer).capitalize(), 's' if beer > 1 else ''
)
onthewall = 'on the wall'
takeonedown = 'Take one down, pass it around'
for beer in range(99, 0, -1):
print bottles(beer), onthewall
print bottles(beer)
print takeonedown
print bottles(beer-1), onthewall
print
| gpl-2.0 |
ArchangelX360/EnvironmentalEventsDetector | computation/imagefetcher/imagefetcher/fetcher.py | 2 | 12815 | #!/usr/bin/env python2
"""Image fetcher, reaching the Google Earth Engine to get images from it."""
import ee
import requests
import time
import threading
from collections import deque
from datetime import datetime
from utils import Error
# We cannot use a flag here, because of how the application is designed.
DEFAULT_QUERY_PER_SECONDS = 3
OPENSTREETMAP_URL = 'http://nominatim.openstreetmap.org/search'
class RateLimit:
"""Implementation of a rate limiter.
This class is highly inspired from the Riot Watcher project, with some
cool functionalities added.
"""
def __init__(self, allowed_requests, seconds):
"""Constructor.
Parameters:
allowed_requests: number of allowed requests during the time frame.
seconds: time frame, in seconds.
"""
self.allowed_requests = allowed_requests
self.seconds = seconds
self.made_requests = deque()
self.lock = threading.Lock()
def _reload(self):
"""Remove old requests."""
t = time.time()
while len(self.made_requests) > 0 and self.made_requests[0] < t:
self.made_requests.popleft()
def add_request(self):
"""Add a request to the counter."""
self.made_requests.append(time.time() + self.seconds)
def requests_available(self):
"""Check if a request is available.
Returns:
False if the rate limit is reached.
"""
self._reload()
return len(self.made_requests) < self.allowed_requests
def __enter__(self):
"""Context management: blocking requests in a threaded context."""
self.lock.acquire()
while not self.requests_available():
time.sleep(0.1)
def __exit__(self, *args):
"""Context management: release the lock in threaded context."""
self.lock.release()
class ImageFetcher:
"""Implementation of the image fetcher."""
def __init__(self, query_per_seconds=DEFAULT_QUERY_PER_SECONDS):
"""Constructor. Initializes a rate limit.
Parameters:
query_per_seconds: number of query per seconds on the backend.
"""
self.rate_limiter = RateLimit(query_per_seconds, 1)
def _load_land_mask(self):
"""Load a mask of lands and rivers.
This mask has 0 values on non-lands part of the image (e.g. oceans or
rivers) and 1 values else. This should be used as a mask to
manipulate lands and non lands part of the image
"""
return (ee.Image('MODIS/051/MCD12Q1/2001_01_01')
.select(['Land_Cover_Type_1'])
.neq(0))
def _GetRGBImage(self, start_date, end_date, geometry, scale):
"""Generates a RGB satellite image of an area within two dates.
See :meth:`GetRGBImage` for information about the parameters.
"""
# Get the Landsat 8 collection.
# TODO(funkysayu) might be good taking a look at other ones.
raw_collection = (ee.ImageCollection('LANDSAT/LC8_L1T')
.filterDate(start_date, end_date)
.filterBounds(geometry))
# Reduce the collection to one image, and clip it to the bounds.
image = raw_collection.median().clip(geometry)
# Create a visualization of the image
visualization = image.visualize(
min=6000,
max=18000,
bands=['B4', 'B3', 'B2'],
)
# Finally generate the png
return visualization.getDownloadUrl({
'region': geometry.toGeoJSONString(),
'scale': scale,
'format': 'png',
})
@staticmethod
def PlaceToGeometry(place_name, place_type=None):
"""Converts a place name to a polygon representation.
Uses the OpenStreetMap public database to convert a city to a GeoJSON
representation.
Parameters:
place_name: name of the place.
place_type: type of the place (city, country...).
Returns:
A Geometry object representing area of the place.
"""
params = {
'format': 'json',
'polygon_geojson': 1,
'limit': 1,
}
if place_type is None:
params['q'] = place_name
else:
params['place_type'] = place_name
result = requests.get(OPENSTREETMAP_URL, params=params)
if not result.ok:
raise Error('Unable to fetch city name. OpenStreetMap status '
'code: %s' % result.status_code, 500)
result_json = result.json()
if len(result_json) == 0:
raise Error('Empty result received from the OpenStreetMap.', 500)
return ee.Geometry(result_json[0].get("geojson", []))
@staticmethod
def CityToGeometry(city_name):
"""Converts a city name to a polygon representation.
Uses the OpenStreetMap public database to convert a city to a GeoJSON
representation.
Parameters:
place_name: name of the city.
Returns:
A Geometry object representing area of the city.
"""
return ImageFetcher.PlaceToGeometry(city_name, place_type='city')
@staticmethod
def CountryToGeometry(country_name):
"""Converts a country name to a polygon representation.
Parameters:
country_name: name of the country.
Returns:
A Geometry object representing area of the country.
"""
feature_id = 'ft:1tdSwUL7MVpOauSgRzqVTOwdfy17KDbw-1d9omPw'
countries = ee.FeatureCollection(feature_id)
name = country_name.capitalize()
server_geo = countries.filter(ee.Filter.eq('Country', name)).geometry()
# At this point, the geometry is still server side. As we need to
# generate the geo json object in order to specify a region to fetch,
# we will dump the object data and put it in a new, client side
# geometry.
return ee.Geometry(server_geo.getInfo())
@staticmethod
def VerticesToGeometry(vertices):
"""Converts a list of vertices to an Earth Engine geometry.
Parameters:
vertices: A list of vertices representing a polygon.
Returns:
The Geometry object corresponding to these vertices.
"""
return ee.Geometry.Polygon(vertices)
@staticmethod
def GeometryToRectangle(geometry):
"""Converts a polygon geometry to the minimal rectangle containing it.
Parameters:
geometry: Computed geometry to convert.
Returns:
The minimal Geometry.Rectangle object containing the input.
"""
def get_rectangle_bounds(parts):
"""Returns the minimal rectangle containing all parts of the
polygon.
"""
x_min, y_min = float('inf'), float('inf')
x_max, y_max = -x_min, -y_min
for part in parts:
for x, y in part:
x_min, y_min = min(x, x_min), min(y, y_min)
x_max, y_max = max(x, x_max), max(y, y_max)
return x_min, y_min, x_max, y_max
geo_json = geometry.toGeoJSON()
# For Polygon, simply return the minimal rectangle containing the
# polygon.
if geo_json['type'] == 'Polygon':
return ee.Geometry.Rectangle(*get_rectangle_bounds(
geo_json['coordinates']))
if geo_json['type'] != 'MultiPolygon':
raise Error('Unsupported polygon type: %s' % geo_json['type'], 500)
# At this point, all geo JSON are of type MultiPolygon. Since some
# requests may contain multiple points on the earth (such as France's
# DOM-TOM), we cannot generate a rectangle containing all this point
# (the Earth Engine API will not appreciate).
# We simply get the largest rectangle generated from the polygons. This
# may not be accurate, but at least it works!
def distance(x_min, y_min, x_max, y_max):
"""Manhattan distance within two 2D points."""
return x_max - x_min + y_max - y_min
max_distance, max_bounds = 0, None
for parts in geo_json['coordinates']:
bounds = get_rectangle_bounds(parts)
bounds_distance = distance(*bounds)
if bounds_distance > max_distance:
max_distance, max_bounds = bounds_distance, bounds
return ee.Geometry.Rectangle(*max_bounds)
def _GetForestIndicesImage(self, start_year, end_year, geometry, scale):
"""Generates a RGB image representing forestation within two years
See :meth:`GetForestIndicesImage` for information about the parameters.
"""
mask = self._load_land_mask()
# Within many datasets, the MODIS/MOD13A1 is the most accurate on the
# amazon rainforest. Other datasets contains noise on some part of the
# image. Also select EVI, which is more accurate than NDVI here.
collection = ee.ImageCollection('MODIS/MOD13A1').select(['EVI'])
# Do the difference between EVI on one year.
older_evi = collection.filterDate(datetime(start_year, 1, 1),
datetime(start_year, 12, 31)).median()
newest_evi = collection.filterDate(datetime(end_year, 1, 1),
datetime(end_year, 12, 31)).median()
difference = newest_evi.subtract(older_evi)
# Set to 0 masked parts, and remove the mask. Thanks to this, image
# will still be generated on masked parts.
difference = difference.where(mask.eq(0), 0).unmask()
# Get negatives (deforestation) and positives (reforestation) parts.
positives = difference.where(difference.lt(0), 0)
negatives = difference.where(difference.gte(0), 0).abs()
# The estimated scale is from 0 to 2000 values. Set the mask to 0 where
# there is lands and 2000 elsewhere.
scaled_mask = mask.where(mask.eq(0), 2000).where(mask.eq(1), 0)
rgb_image = ee.Image.rgb(negatives, positives, scaled_mask)
clipped = rgb_image.clip(geometry)
return clipped.visualize(min=0, max=2000).getDownloadURL({
'region': geometry.toGeoJSONString(),
'scale': scale,
'format': 'png',
})
def GetRGBImage(self, start_date, end_date, geometry, scale=100):
"""Generates a RGB satellite image of an area within two dates.
Parameters:
start_date: images in the collection generating the final picture
must have a later date than this one.
end_date: images in the collection generating the final picture
must have a earlier date than this one.
geometry: area to fetch. Earth Enging Geometry object.
scale: image resolution, in meters per pixels.
Returns:
An URL to the generated image.
"""
with self.rate_limiter:
return self._GetRGBImage(start_date, end_date, geometry, scale)
def GetForestIndicesImage(self, start_year, end_year, geometry, scale):
"""Generates a RGB image representing forestation within two years.
Generates a RGB image where red green and blue channels correspond
respectively to deforestation, reforestation and non land values. Non
land values (blue channel) is set to 255 if the pixel is over non-land
field (such as ocean, rivers...) and 0 elsewhere.
Analysis are done over one year to ensure weather metrics will not
polute the result. We also use the Enhanced Vegetation Index (EVI)
instead of the Normalized Difference Vegetation Index (NDVI) because
the accuracy is better on this dataset.
The dataset used to generate this image is the MOD13A1.005 Vegetation
Indices 16-Day L3 Global 500m [1], provided publicly by the NASA. It is
updated every 16 days, with a maximum scale of 500 meter per pixels.
This is the most accurate on the amazon rainforest.
Parameters:
start_year: integer representing the reference year. Must be
greater than or equal to 2000.
end_year: integer representing the year on which we will subtract
the data generated from the start_year. Must be greater than
start_year, and lower than or equal to the current year.
geometry: area to fetch; Earth Engin Geometry object.
scale: image resolution, in meters per pixels.
Returns:
An URL to the generated image.
"""
with self.rate_limiter:
return self._GetForestIndicesImage(start_year, end_year, geometry,
scale)
| mit |
cmvelo/ansible | lib/ansible/executor/task_queue_manager.py | 3 | 14009 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import multiprocessing
import os
import tempfile
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.play_iterator import PlayIterator
from ansible.executor.process.result import ResultProcess
from ansible.executor.stats import AggregateStats
from ansible.playbook.block import Block
from ansible.playbook.play_context import PlayContext
from ansible.plugins import callback_loader, strategy_loader, module_loader
from ansible.template import Templar
from ansible.vars.hostvars import HostVars
from ansible.plugins.callback import CallbackBase
from ansible.utils.unicode import to_unicode
from ansible.compat.six import string_types
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['TaskQueueManager']
class TaskQueueManager:
'''
This class handles the multiprocessing requirements of Ansible by
creating a pool of worker forks, a result handler fork, and a
manager object with shared datastructures/queues for coordinating
work between all processes.
The queue manager is responsible for loading the play strategy plugin,
which dispatches the Play's tasks to hosts.
'''
def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False):
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._options = options
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
self._run_additional_callbacks = run_additional_callbacks
self._run_tree = run_tree
self._callbacks_loaded = False
self._callback_plugins = []
self._start_at_done = False
self._result_prc = None
# make sure the module path (if specified) is parsed and
# added to the module_loader object
if options.module_path is not None:
for path in options.module_path.split(os.pathsep):
module_loader.add_directory(path)
# a special flag to help us exit cleanly
self._terminated = False
# this dictionary is used to keep track of notified handlers
self._notified_handlers = dict()
# dictionaries to keep track of failed/unreachable hosts
self._failed_hosts = dict()
self._unreachable_hosts = dict()
self._final_q = multiprocessing.Queue()
# A temporary file (opened pre-fork) used by connection
# plugins for inter-process locking.
self._connection_lockfile = tempfile.TemporaryFile()
def _initialize_processes(self, num):
self._workers = []
for i in range(num):
rslt_q = multiprocessing.Queue()
self._workers.append([None, rslt_q])
self._result_prc = ResultProcess(self._final_q, self._workers)
self._result_prc.start()
def _initialize_notified_handlers(self, handlers):
'''
Clears and initializes the shared notified handlers dict with entries
for each handler in the play, which is an empty array that will contain
inventory hostnames for those hosts triggering the handler.
'''
# Zero the dictionary first by removing any entries there.
# Proxied dicts don't support iteritems, so we have to use keys()
for key in self._notified_handlers.keys():
del self._notified_handlers[key]
def _process_block(b):
temp_list = []
for t in b.block:
if isinstance(t, Block):
temp_list.extend(_process_block(t))
else:
temp_list.append(t)
return temp_list
handler_list = []
for handler_block in handlers:
handler_list.extend(_process_block(handler_block))
# then initialize it with the handler names from the handler list
for handler in handler_list:
self._notified_handlers[handler.get_name()] = []
def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if isinstance(self._stdout_callback, CallbackBase):
stdout_callback_loaded = True
elif isinstance(self._stdout_callback, string_types):
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
else:
self._stdout_callback = callback_loader.get(self._stdout_callback)
stdout_callback_loaded = True
else:
raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
for callback_plugin in callback_loader.all(class_only=True):
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
# we only allow one callback of type 'stdout' to be loaded, so check
# the name of the current plugin and type to see if we need to skip
# loading this callback plugin
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)
callback_needs_whitelist = getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False)
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
if callback_type == 'stdout':
if callback_name != self._stdout_callback or stdout_callback_loaded:
continue
stdout_callback_loaded = True
elif callback_name == 'tree' and self._run_tree:
pass
elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)):
continue
self._callback_plugins.append(callback_plugin())
self._callbacks_loaded = True
def run(self, play):
'''
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
'''
if not self._callbacks_loaded:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
self.hostvars = HostVars(
inventory=self._inventory,
variable_manager=self._variable_manager,
loader=self._loader,
)
# Fork # of forks, # of hosts or serial, whichever is lowest
contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts))]
contenders = [ v for v in contenders if v is not None and v > 0 ]
self._initialize_processes(min(contenders))
play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
# initialize the shared dictionary containing the notified handlers
self._initialize_notified_handlers(new_play.handlers)
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# build the iterator
iterator = PlayIterator(
inventory=self._inventory,
play=new_play,
play_context=play_context,
variable_manager=self._variable_manager,
all_vars=all_vars,
start_at_done = self._start_at_done,
)
# Because the TQM may survive multiple play runs, we start by marking
# any hosts as failed in the iterator here which may have been marked
# as failed in previous runs. Then we clear the internal list of failed
# hosts so we know what failed this round.
for host_name in self._failed_hosts.keys():
host = self._inventory.get_host(host_name)
iterator.mark_host_failed(host)
self.clear_failed_hosts()
# during initialization, the PlayContext will clear the start_at_task
# field to signal that a matching task was found, so check that here
# and remember it so we don't try to skip tasks on future plays
if getattr(self._options, 'start_at_task', None) is not None and play_context.start_at_task is None:
self._start_at_done = True
# and run the play using the strategy and cleanup on way out
play_return = strategy.run(iterator, play_context)
# now re-save the hosts that failed from the iterator to our internal list
for host_name in iterator.get_failed_hosts():
self._failed_hosts[host_name] = True
self._cleanup_processes()
return play_return
def cleanup(self):
display.debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._cleanup_processes()
def _cleanup_processes(self):
if self._result_prc:
self._result_prc.terminate()
for (worker_prc, rslt_q) in self._workers:
rslt_q.close()
if worker_prc and worker_prc.is_alive():
try:
worker_prc.terminate()
except AttributeError:
pass
def clear_failed_hosts(self):
self._failed_hosts = dict()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_notified_handlers(self):
return self._notified_handlers
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
# try to find v2 method, fallback to v1 method, ignore callback if no method found
methods = []
for possible in [method_name, 'v2_on_any']:
gotit = getattr(callback_plugin, possible, None)
if gotit is None:
gotit = getattr(callback_plugin, possible.replace('v2_',''), None)
if gotit is not None:
methods.append(gotit)
for method in methods:
try:
# temporary hack, required due to a change in the callback API, so
# we don't break backwards compatibility with callbacks which were
# designed to use the original API
# FIXME: target for removal and revert to the original code here after a year (2017-01-14)
if method_name == 'v2_playbook_on_start':
import inspect
(f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method)
if 'playbook' in f_args:
method(*args, **kwargs)
else:
method()
else:
method(*args, **kwargs)
except Exception as e:
#TODO: add config toggle to make this fatal or not?
display.warning(u"Failure using method (%s) in callback plugin (%s): %s" % (to_unicode(method_name), to_unicode(callback_plugin), to_unicode(e)))
from traceback import format_tb
from sys import exc_info
display.debug('Callback Exception: \n' + ' '.join(format_tb(exc_info()[2])))
| gpl-3.0 |
jonesgithub/zulip | zerver/lib/email_mirror.py | 113 | 10159 | from __future__ import absolute_import
import logging
import re
from email.header import decode_header
from django.conf import settings
from zerver.lib.actions import decode_email_address, internal_send_message
from zerver.lib.notifications import convert_html_to_markdown
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.upload import upload_message_image
from zerver.lib.utils import generate_random_token
from zerver.models import Stream, Recipient, get_user_profile_by_email, \
get_user_profile_by_id, get_display_recipient, get_recipient
logger = logging.getLogger(__name__)
def redact_stream(error_message):
domain = settings.EMAIL_GATEWAY_PATTERN.rsplit('@')[-1]
stream_match = re.search(r'\b(.*?)@' + domain, error_message)
if stream_match:
stream_name = stream_match.groups()[0]
return error_message.replace(stream_name, "X" * len(stream_name))
return error_message
def report_to_zulip(error_message):
error_stream = Stream.objects.get(name="errors", realm__domain=settings.ADMIN_DOMAIN)
send_zulip(error_stream, "email mirror error",
"""~~~\n%s\n~~~""" % (error_message,))
def log_and_report(email_message, error_message, debug_info):
scrubbed_error = "Sender: %s\n%s" % (email_message.get("From"),
redact_stream(error_message))
if "to" in debug_info:
scrubbed_error = "Stream: %s\n%s" % (redact_stream(debug_info["to"]),
scrubbed_error)
if "stream" in debug_info:
scrubbed_error = "Realm: %s\n%s" % (debug_info["stream"].realm.domain,
scrubbed_error)
logger.error(scrubbed_error)
report_to_zulip(scrubbed_error)
# Temporary missed message addresses
redis_client = get_redis_client()
def missed_message_redis_key(token):
return 'missed_message:' + token
def is_missed_message_address(address):
local_part = address.split('@')[0]
return local_part.startswith('mm') and len(local_part) == 34
def get_missed_message_token_from_address(address):
local_part = address.split('@')[0]
if not address.startswith('mm') and len(address) != 34:
raise ZulipEmailForwardError('Could not parse missed message address')
return local_part[2:]
def create_missed_message_address(user_profile, message):
if message.recipient.type == Recipient.PERSONAL:
# We need to reply to the sender so look up their personal recipient_id
recipient_id = get_recipient(Recipient.PERSONAL, message.sender_id).id
else:
recipient_id = message.recipient_id
data = {
'user_profile_id': user_profile.id,
'recipient_id': recipient_id,
'subject': message.subject,
}
while True:
token = generate_random_token(32)
key = missed_message_redis_key(token)
if redis_client.hsetnx(key, 'uses_left', 1):
break
with redis_client.pipeline() as pipeline:
pipeline.hmset(key, data)
pipeline.expire(key, 60 * 60 * 24 * 5)
pipeline.execute()
address = 'mm' + token
return settings.EMAIL_GATEWAY_PATTERN % (address, )
def mark_missed_message_address_as_used(address):
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
with redis_client.pipeline() as pipeline:
pipeline.hincrby(key, 'uses_left', -1)
pipeline.expire(key, 60 * 60 * 24 * 5)
new_value = pipeline.execute()[0]
if new_value < 0:
redis_client.delete(key)
raise ZulipEmailForwardError('Missed message address has already been used')
def send_to_missed_message_address(address, message):
token = get_missed_message_token_from_address(address)
key = missed_message_redis_key(token)
result = redis_client.hmget(key, 'user_profile_id', 'recipient_id', 'subject')
if not all(val is not None for val in result):
raise ZulipEmailForwardError('Missing missed message address data')
user_profile_id, recipient_id, subject = result
user_profile = get_user_profile_by_id(user_profile_id)
recipient = Recipient.objects.get(id=recipient_id)
display_recipient = get_display_recipient(recipient)
# Testing with basestring so we don't depend on the list return type from
# get_display_recipient
if not isinstance(display_recipient, basestring):
display_recipient = ','.join([user['email'] for user in display_recipient])
body = filter_footer(extract_body(message))
body += extract_and_upload_attachments(message, user_profile.realm)
if not body:
body = '(No email body)'
if recipient.type == Recipient.STREAM:
recipient_type_name = 'stream'
else:
recipient_type_name = 'private'
internal_send_message(user_profile.email, recipient_type_name,
display_recipient, subject, body)
## Sending the Zulip ##
class ZulipEmailForwardError(Exception):
pass
def send_zulip(stream, topic, content):
internal_send_message(
settings.EMAIL_GATEWAY_BOT,
"stream",
stream.name,
topic[:60],
content[:2000],
stream.realm)
def valid_stream(stream_name, token):
try:
stream = Stream.objects.get(email_token=token)
return stream.name.lower() == stream_name.lower()
except Stream.DoesNotExist:
return False
def get_message_part_by_type(message, content_type):
charsets = message.get_charsets()
for idx, part in enumerate(message.walk()):
if part.get_content_type() == content_type:
content = part.get_payload(decode=True)
if charsets[idx]:
content = content.decode(charsets[idx], errors="ignore")
return content
def extract_body(message):
# If the message contains a plaintext version of the body, use
# that.
plaintext_content = get_message_part_by_type(message, "text/plain")
if plaintext_content:
return plaintext_content
# If we only have an HTML version, try to make that look nice.
html_content = get_message_part_by_type(message, "text/html")
if html_content:
return convert_html_to_markdown(html_content)
raise ZulipEmailForwardError("Unable to find plaintext or HTML message body")
def filter_footer(text):
# Try to filter out obvious footers.
possible_footers = filter(lambda line: line.strip().startswith("--"),
text.split("\n"))
if len(possible_footers) != 1:
# Be conservative and don't try to scrub content if there
# isn't a trivial footer structure.
return text
return text.partition("--")[0].strip()
def extract_and_upload_attachments(message, realm):
user_profile = get_user_profile_by_email(settings.EMAIL_GATEWAY_BOT)
attachment_links = []
payload = message.get_payload()
if not isinstance(payload, list):
# This is not a multipart message, so it can't contain attachments.
return ""
for part in payload:
content_type = part.get_content_type()
filename = part.get_filename()
if filename:
s3_url = upload_message_image(filename, content_type,
part.get_payload(decode=True),
user_profile,
target_realm=realm)
formatted_link = "[%s](%s)" % (filename, s3_url)
attachment_links.append(formatted_link)
return "\n".join(attachment_links)
def extract_and_validate(email):
try:
stream_name, token = decode_email_address(email)
except (TypeError, ValueError):
raise ZulipEmailForwardError("Malformed email recipient " + email)
if not valid_stream(stream_name, token):
raise ZulipEmailForwardError("Bad stream token from email recipient " + email)
return Stream.objects.get(email_token=token)
def find_emailgateway_recipient(message):
# We can't use Delivered-To; if there is a X-Gm-Original-To
# it is more accurate, so try to find the most-accurate
# recipient list in descending priority order
recipient_headers = ["X-Gm-Original-To", "Delivered-To", "To"]
recipients = []
for recipient_header in recipient_headers:
r = message.get_all(recipient_header, None)
if r:
recipients = r
break
pattern_parts = [re.escape(part) for part in settings.EMAIL_GATEWAY_PATTERN.split('%s')]
match_email_re = re.compile(".*?".join(pattern_parts))
for recipient_email in recipients:
if match_email_re.match(recipient_email):
return recipient_email
raise ZulipEmailForwardError("Missing recipient in mirror email")
def process_stream_message(to, subject, message, debug_info):
stream = extract_and_validate(to)
body = filter_footer(extract_body(message))
body += extract_and_upload_attachments(message, stream.realm)
debug_info["stream"] = stream
if not body:
# You can't send empty Zulips, so to avoid confusion over the
# email forwarding failing, set a dummy message body.
body = "(No email body)"
send_zulip(stream, subject, body)
def process_missed_message(to, message, pre_checked):
if not pre_checked:
mark_missed_message_address_as_used(to)
send_to_missed_message_address(to, message)
def process_message(message, rcpt_to=None, pre_checked=False):
subject = decode_header(message.get("Subject", "(no subject)"))[0][0]
debug_info = {}
try:
if rcpt_to is not None:
to = rcpt_to
else:
to = find_emailgateway_recipient(message)
debug_info["to"] = to
if is_missed_message_address(to):
process_missed_message(to, message, pre_checked)
else:
process_stream_message(to, subject, message, debug_info)
except ZulipEmailForwardError, e:
# TODO: notify sender of error, retry if appropriate.
log_and_report(message, e.message, debug_info)
| apache-2.0 |
LCOGT/whatsup | whatsup/urls.py | 1 | 1040 | """
WhatsUP: astronomical object suggestions for Las Cumbres Observatory Global Telescope Network
Copyright (C) 2014-2015 LCOGT
urls.py
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
from django.urls import include, path
from . import views
urlpatterns = [
path('', views.api_root, name='apiroot'),
path('target/',views.TargetDetailView.as_view(), name="api_target"),
path('search/v2/', views.TargetListView.as_view(), name="api_v2_search"),
path('search/', views.TargetListView.as_view(), name="api_search"),
path('range/', views.TargetListRangeView.as_view(), name="api_range"),
]
| gpl-3.0 |
Dhivyap/ansible | lib/ansible/plugins/doc_fragments/k8s_resource_options.py | 40 | 1295 | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Options for providing an object configuration
class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
resource_definition:
description:
- "Provide a valid YAML definition (either as a string, list, or dict) for an object when creating or updating. NOTE: I(kind), I(api_version), I(name),
and I(namespace) will be overwritten by corresponding values found in the provided I(resource_definition)."
aliases:
- definition
- inline
src:
description:
- "Provide a path to a file containing a valid YAML definition of an object or objects to be created or updated. Mutually
exclusive with I(resource_definition). NOTE: I(kind), I(api_version), I(name), and I(namespace) will be
overwritten by corresponding values found in the configuration read in from the I(src) file."
- Reads from the local file system. To read from the Ansible controller's file system, including vaulted files, use the file lookup
plugin or template lookup plugin, combined with the from_yaml filter, and pass the result to
I(resource_definition). See Examples below.
type: path
'''
| gpl-3.0 |
IOArmory/quarterbackpython | lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py | 1777 | 3764 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
sowravi/sp17-i524 | project/S17-IO-3017/code/projectearth/kmeansplot.py | 14 | 3018 | import requests
import time
import dblayer
import plotly
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import random
from sklearn.cluster import KMeans
import testfile
NUM_CLUSTER = 3
def generate_color():
color = '#{:02x}{:02x}{:02x}'.format(*map(lambda x: random.randint(0, 255), range(NUM_CLUSTER)))
return color
# Create random colors in list
color_list = []
for i in range(NUM_CLUSTER):
color_list.append(generate_color())
def showMagnitudesInCluster(data):
kmeans = KMeans(n_clusters=NUM_CLUSTER)
kmeans.fit(data)
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
plot_data = []
for i in range(NUM_CLUSTER):
ds = data[np.where(labels == i)]
clustername = "Cluster " + str(i+1)
trace = go.Scatter(x=ds[:, 0], y=ds[:, 1], mode='markers', showlegend='false', name=clustername, marker=dict(size=5, color=color_list[i]))
plot_data.append(trace)
# plot the centroids
trace = go.Scatter(x=centroids[i, 0], y=centroids[i, 1], mode='markers', marker=dict(size=10, color='black'))
plot_data.append(trace)
layout = go.Layout(title='Magnitude Vs. Depth - K-Means Clusters', titlefont=dict(family='Courier New, monospace',size=20,color='#7f7f7f'),
xaxis=dict(title='Depth of Earthquake', titlefont=dict(family='Courier New, monospace',size=18,color='#7f7f7f')),
yaxis=dict(title='Magnitude',titlefont=dict(family='Courier New, monospace',size=18,color='#7f7f7f'))
)
fig = go.Figure(data=plot_data, layout=layout)
div = plotly.offline.plot(fig, include_plotlyjs=True, output_type='div')
return div
def mkMag():
#### TME: Get start time
start_time = time.time()
####
sess = requests.Session()
dbobj = dblayer.classDBLayer()
projection = [
{"$project": {"_id": 0, "mag": "$properties.mag", "depth": {"$arrayElemAt": ["$geometry.coordinates", 2]}}}]
dframe_mag = pd.DataFrame(list(dbobj.doaggregate(projection)))
#### TME: Elapsed time taken to read data from MongoDB
fileobj = testfile.classFileWrite()
elapsed = time.time() - start_time
fileobj.writeline()
str1 = str(elapsed) + " secs required to read " + str(dframe_mag['depth'].count()) + " records from database."
fileobj.writelog("Reading Magnitude and Depth data")
fileobj.writelog(str1)
####
#### TME: Get start time
start_time = time.time()
####
div = showMagnitudesInCluster(dframe_mag.values)
response = """<html><title></title><head><meta charset=\"utf8\"> </head> <body>""" + div + """</body> </html>"""
#### TME: Elapsed time taken to cluster and plot data
elapsed = time.time() - start_time
fileobj.writeline()
str1 = "Applying K-Means clustering and plotting its output \n" + "Time taken: " + str(elapsed)
fileobj.writelog(str1)
fileobj.writeline()
fileobj.closefile()
dbobj.closedb()
return response
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.