text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import json
import os
import glob
import re
import uuid
import logging
from placebo.serializer import serialize, deserialize
LOG = logging.getLogger(__name__)
DebugFmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
class FakeHttpResponse(object):
def __init__(self, status_code):
self.status_code = status_code
class Pill(object):
clients = []
def __init__(self, prefix=None, debug=False):
if debug:
self._set_logger(__name__, logging.DEBUG)
self.filename_re = re.compile(r'.*\..*_(?P<index>\d+).json')
self.prefix = prefix
self._uuid = str(uuid.uuid4())
self._data_path = None
self._mode = None
self._session = None
self._index = {}
self.events = []
self.clients = []
@property
def mode(self):
return self._mode
@property
def data_path(self):
return self._data_path
@property
def session(self):
return self._session
def _set_logger(self, logger_name, level=logging.INFO):
"""
Convenience function to quickly configure full debug output
to go to the console.
"""
log = logging.getLogger(logger_name)
log.setLevel(level)
ch = logging.StreamHandler(None)
ch.setLevel(level)
formatter = logging.Formatter(DebugFmtString)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def _create_shim_class(self):
# This is kind of tricky. Maybe too tricky.
# We want to know about all of the clients that are created within
# the session we are attached to. To do that, we dynamically
# create a Class object that will become a superclass of each
# new client that gets created. This superclass has an __init__
# method that appends the new client instance into this Pill
# instance's list of clients. The ugly code messing around with
# mro() is there because we have to define an __init__ method in
# our dynamically created class so we have to make sure it
# calls it's superclass's __init__ methods. We can't use
# super() because that needs the class (which we are in the process
# of creating) so we walk the method resolution order to find
# our superclass. The funny business with foo is because the
# self inside _init_method stomps on the self defined in the
# scope of this method but we really just need a reference to
# the the method for adding a client.
foo = self.add_client
def _init_method(self, *args, **kwargs):
res_order = self.__class__.mro()
for i, cls in enumerate(res_order):
if cls.__name__ == 'PillShim':
break
super_cls_index = i + 1
if len(res_order) >= super_cls_index + 1:
super_cls = res_order[super_cls_index]
super_cls.__init__(self, *args, **kwargs)
foo(self)
class_attributes = {'__init__': _init_method}
bases = []
class_name = 'PillShim'
cls = type(class_name, tuple(bases), class_attributes)
return cls
def _create_client(self, class_attributes, base_classes, **kwargs):
LOG.debug('_create_client')
base_classes.insert(0, self._create_shim_class())
def add_client(self, client):
self.clients.append(client)
def attach(self, session, data_path):
LOG.debug('attaching to session: %s', session)
LOG.debug('datapath: %s', data_path)
self._session = session
self._data_path = data_path
session.events.register('creating-client-class', self._create_client)
def record(self, services='*', operations='*'):
if self._mode == 'playback':
self.stop()
self._mode = 'record'
for service in services.split(','):
for operation in operations.split(','):
event = 'after-call.{0}.{1}'.format(
service.strip(), operation.strip())
LOG.debug('recording: %s', event)
self.events.append(event)
self._session.events.register(
event, self._record_data, 'placebo-record-mode')
for client in self.clients:
client.meta.events.register(
event, self._record_data, 'placebo-record-mode')
def playback(self):
if self.mode == 'record':
self.stop()
if self.mode is None:
event = 'before-call.*.*'
self.events.append(event)
self._session.events.register(
event, self._mock_request, 'placebo-playback-mode')
self._mode = 'playback'
for client in self.clients:
client.meta.events.register(
event, self._mock_request, 'placebo-playback-mode')
def stop(self):
LOG.debug('stopping, mode=%s', self.mode)
if self.mode == 'record':
if self._session:
for event in self.events:
self._session.events.unregister(
event, unique_id='placebo-record-mode')
for client in self.clients:
client.meta.events.unregister(
event, unique_id='placebo-record-mode')
self.events = []
elif self.mode == 'playback':
if self._session:
for event in self.events:
self._session.events.unregister(
event, unique_id='placebo-playback-mode')
for client in self.clients:
client.meta.events.unregister(
event, unique_id='placebo-playback-mode')
self.events = []
self._mode = None
def _record_data(self, http_response, parsed, model, **kwargs):
LOG.debug('_record_data')
service_name = model.service_model.endpoint_prefix
operation_name = model.name
self.save_response(service_name, operation_name, parsed,
http_response.status_code)
def get_new_file_path(self, service, operation):
base_name = '{0}.{1}'.format(service, operation)
if self.prefix:
base_name = '{0}.{1}'.format(self.prefix, base_name)
LOG.debug('get_new_file_path: %s', base_name)
index = 0
glob_pattern = os.path.join(self._data_path, base_name + '*')
for file_path in glob.glob(glob_pattern):
file_name = os.path.basename(file_path)
m = self.filename_re.match(file_name)
if m:
i = int(m.group('index'))
if i > index:
index = i
index += 1
return os.path.join(
self._data_path, '{0}_{1}.json'.format(base_name, index))
def get_next_file_path(self, service, operation):
base_name = '{0}.{1}'.format(service, operation)
if self.prefix:
base_name = '{0}.{1}'.format(self.prefix, base_name)
LOG.debug('get_next_file_path: %s', base_name)
next_file = None
while next_file is None:
index = self._index.setdefault(base_name, 1)
fn = os.path.join(
self._data_path, base_name + '_{0}.json'.format(index))
if os.path.exists(fn):
next_file = fn
self._index[base_name] += 1
elif index != 1:
self._index[base_name] = 1
else:
# we are looking for the first index and it's not here
raise IOError('response file ({0}) not found'.format(fn))
return fn
def save_response(self, service, operation, response_data,
http_response=200):
"""
Store a response to the data directory. The ``operation``
should be the name of the operation in the service API (e.g.
DescribeInstances), the ``response_data`` should a value you want
to return from a placebo call and the ``http_response`` should be
the HTTP status code returned from the service. You can add
multiple responses for a given operation and they will be
returned in order.
"""
LOG.debug('save_response: %s.%s', service, operation)
filepath = self.get_new_file_path(service, operation)
LOG.debug('save_response: path=%s', filepath)
json_data = {'status_code': http_response,
'data': response_data}
with open(filepath, 'w') as fp:
json.dump(json_data, fp, indent=4, default=serialize)
def load_response(self, service, operation):
LOG.debug('load_response: %s.%s', service, operation)
response_file = self.get_next_file_path(service, operation)
LOG.debug('load_responses: %s', response_file)
with open(response_file, 'r') as fp:
response_data = json.load(fp, object_hook=deserialize)
return (FakeHttpResponse(response_data['status_code']),
response_data['data'])
def _mock_request(self, **kwargs):
"""
A mocked out make_request call that bypasses all network calls
and simply returns any mocked responses defined.
"""
model = kwargs.get('model')
service = model.service_model.endpoint_prefix
operation = model.name
LOG.debug('_make_request: %s.%s', service, operation)
return self.load_response(service, operation)
|
{
"content_hash": "47791bdce3afc3644911c58351cca0c4",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 77,
"avg_line_length": 38.69322709163347,
"alnum_prop": 0.5647652388797364,
"repo_name": "HesselTjeerdsma/Cyber-Physical-Pacman-Game",
"id": "1589da5b49c418680f38d355b3c18d06d13e8c53",
"size": "10294",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Algor/flask/lib/python2.7/site-packages/placebo/pill.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "144418"
},
{
"name": "Batchfile",
"bytes": "24"
},
{
"name": "C",
"bytes": "527696"
},
{
"name": "C++",
"bytes": "274346"
},
{
"name": "CSS",
"bytes": "79630"
},
{
"name": "Fortran",
"bytes": "14949"
},
{
"name": "JavaScript",
"bytes": "28328"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "30670952"
},
{
"name": "Roff",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "9697"
},
{
"name": "TeX",
"bytes": "1628"
}
],
"symlink_target": ""
}
|
"""
This file contains the class definitions for various priors.
"""
__author__ = 'Brandon C. Kelly'
import numpy as np
from scipy import stats
class Prior(object):
"""
Base class for prior distribution objects. The two main methods of the prior class are Draw() and
LogDensity(), both of which must be overriden in order to be used. However, it is only required that
LogDensity be overriden, as it is the only method required by an MCMC sampler.
This class is merely a base class and should never be instantiated.
"""
__slots__ = ["temperature"]
def __init__(self, temperature=1.0):
"""
Constructor for prior object.
:param temperature: The prior 'temperature', used for tempered distributions. If you don't
know what this means than just use the default of 1.0, since this corresponds
to "normal" MCMC.
"""
self.temperature = temperature
def logdensity(self, value):
"""
The logarithm of the prior distribution evaluated at the input value.
:param value: Compute the prior distribution at this value.
"""
return 0.0
def draw(self):
"""
Return a random draw from the prior distribution.
"""
return 0.0
class Uninformative(Prior):
"""
An uninformative prior. This just return 0.0 for the logarithm of the prior distribution for all
values of the parameter. The Draw() method is undefined for this class and should not be used.
"""
def logdensity(self, value):
return 0.0
class Normal(Prior):
"""
A normal prior.
"""
__slots__ = ["mu", "variance", "sigma"]
def __init__(self, mu, variance, temperature=1.0):
"""
Constructor for normal prior object.
:param mu: The prior mean.
:param variance: The prior variance.
:param temperature: Temperature of prior distribution, see Prior class documentation.
"""
Prior.__init__(self, temperature=temperature)
self.mu = mu
self.variance = variance
self.sigma = np.sqrt(variance)
def logdensity(self, value):
return stats.norm.logpdf(value, loc=self.mu, scale=self.sigma)
def draw(self):
return stats.norm.rvs(loc=self.mu, scale=self.sigma)
class ScaledInvChiSqr(Prior):
"""
A scaled inverse-chi-square prior object.
"""
__slots__ = ["dof", "ssqr"]
def __init__(self, dof, ssqr, temperature=1.0):
"""
Constructor for scaled inverse-chi-square prior object.
:param dof: The prior degrees of freedom.
:param ssqr: The prior variance.
:param temperature: Temperature of prior distribution, see Prior class documentation.
"""
Prior.__init__(self, temperature=temperature)
# assert dof > 0 # Make sure parameter values are positive
# assert ssqr > 0
self.dof = dof
self.ssqr = ssqr
def logdensity(self, value):
return stats.invgamma.logpdf(value, self.dof / 2.0, scale=self.dof * self.ssqr / 2.0)
def draw(self):
return stats.invgamma.rvs(self.dof / 2.0, scale=self.dof * self.ssqr / 2.0)
|
{
"content_hash": "ae35a1ca5e0d483e7ded12987f25a242",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 105,
"avg_line_length": 30.556603773584907,
"alnum_prop": 0.6214881136153134,
"repo_name": "acbecker/BART",
"id": "60f996f021b8d2e3b900c0aeb011346d260c5607",
"size": "3239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "priors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "169602"
}
],
"symlink_target": ""
}
|
import re
from datetime import datetime
from typing import Any, Dict, List
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.lib.actions import check_send_stream_message
from zerver.lib.exceptions import JsonableError
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile, Client
SUBJECT_TEMPLATE = "{service_url}"
def send_message_for_event(event, user_profile, client, stream):
# type: (Dict[str, Any], UserProfile, Client, str) -> None
event_type = get_event_type(event)
subject = SUBJECT_TEMPLATE.format(service_url=event['check']['url'])
body = EVENT_TYPE_BODY_MAPPER[event_type](event)
check_send_stream_message(user_profile, client, stream, subject, body)
def get_body_for_up_event(event):
# type: (Dict[str, Any]) -> str
body = "Service is `up`"
event_downtime = event['downtime']
if event_downtime['started_at']:
body = "{} again".format(body)
string_date = get_time_string_based_on_duration(event_downtime['duration'])
if string_date:
body = "{} after {}".format(body, string_date)
return "{}.".format(body)
def get_time_string_based_on_duration(duration):
# type: (int) -> str
days, reminder = divmod(duration, 86400)
hours, reminder = divmod(reminder, 3600)
minutes, seconds = divmod(reminder, 60)
string_date = ''
string_date += add_time_part_to_string_date_if_needed(days, 'day')
string_date += add_time_part_to_string_date_if_needed(hours, 'hour')
string_date += add_time_part_to_string_date_if_needed(minutes, 'minute')
string_date += add_time_part_to_string_date_if_needed(seconds, 'second')
return string_date.rstrip()
def add_time_part_to_string_date_if_needed(value, text_name):
# type: (int, str) -> str
if value == 1:
return "1 {} ".format(text_name)
if value > 1:
return "{} {}s ".format(value, text_name)
return ''
def get_body_for_down_event(event):
# type: (Dict[str, Any]) -> str
return "Service is `down`. It returned a {} error at {}.".format(
event['downtime']['error'],
event['downtime']['started_at'].replace('T', ' ').replace('Z', ' UTC'))
@api_key_only_webhook_view('Updown')
@has_request_variables
def api_updown_webhook(request, user_profile,
payload=REQ(argument_type='body'),
stream=REQ(default='updown')):
# type: (HttpRequest, UserProfile, List[Dict[str, Any]], str) -> HttpResponse
for event in payload:
send_message_for_event(event, user_profile, request.client, stream)
return json_success()
EVENT_TYPE_BODY_MAPPER = {
'up': get_body_for_up_event,
'down': get_body_for_down_event
}
def get_event_type(event):
# type: (Dict[str, Any]) -> str
event_type_match = re.match('check.(.*)', event['event'])
if event_type_match:
event_type = event_type_match.group(1)
if event_type in EVENT_TYPE_BODY_MAPPER:
return event_type
raise JsonableError(_('Unsupported Updown event type: %s') % (event['event'],))
|
{
"content_hash": "ae73e76c40f0d20df7190d2728617dd2",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 83,
"avg_line_length": 39.3780487804878,
"alnum_prop": 0.6577887890987922,
"repo_name": "brockwhittaker/zulip",
"id": "db7048492dfa3c3c45f1debd75931f09a1a7a4fb",
"size": "3267",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zerver/webhooks/updown/view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "442662"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "515931"
},
{
"name": "JavaScript",
"bytes": "2195008"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "393671"
},
{
"name": "Puppet",
"bytes": "87413"
},
{
"name": "Python",
"bytes": "3948219"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "65702"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from zope.interface import implementer
from twisted.protocols.policies import ProtocolWrapper
try:
# noinspection PyUnresolvedReferences
from twisted.web.error import NoResource
except ImportError:
# starting from Twisted 12.2, NoResource has moved
from twisted.web.resource import NoResource
from twisted.web.resource import IResource, Resource
from six import PY3
# The following triggers an import of reactor at module level!
#
from twisted.web.server import NOT_DONE_YET
__all__ = (
'WebSocketResource',
'WSGIRootResource',
)
class WSGIRootResource(Resource):
"""
Root resource when you want a WSGI resource be the default serving
resource for a Twisted Web site, but have subpaths served by
different resources.
This is a hack needed since
`twisted.web.wsgi.WSGIResource <http://twistedmatrix.com/documents/current/api/twisted.web.wsgi.WSGIResource.html>`_.
does not provide a ``putChild()`` method.
.. seealso::
* `Autobahn Twisted Web WSGI example <https://github.com/crossbario/autobahn-python/tree/master/examples/twisted/websocket/echo_wsgi>`_
* `Original hack <http://blog.vrplumber.com/index.php?/archives/2426-Making-your-Twisted-resources-a-url-sub-tree-of-your-WSGI-resource....html>`_
"""
def __init__(self, wsgiResource, children):
"""
:param wsgiResource: The WSGI to serve as root resource.
:type wsgiResource: Instance of `twisted.web.wsgi.WSGIResource <http://twistedmatrix.com/documents/current/api/twisted.web.wsgi.WSGIResource.html>`_.
:param children: A dictionary with string keys constituting URL subpaths, and Twisted Web resources as values.
:type children: dict
"""
Resource.__init__(self)
self._wsgiResource = wsgiResource
self.children = children
def getChild(self, path, request):
request.prepath.pop()
request.postpath.insert(0, path)
return self._wsgiResource
@implementer(IResource)
class WebSocketResource(object):
"""
A Twisted Web resource for WebSocket.
"""
isLeaf = True
def __init__(self, factory):
"""
:param factory: An instance of :class:`autobahn.twisted.websocket.WebSocketServerFactory`.
:type factory: obj
"""
self._factory = factory
# noinspection PyUnusedLocal
def getChildWithDefault(self, name, request):
"""
This resource cannot have children, hence this will always fail.
"""
return NoResource("No such child resource.")
def putChild(self, path, child):
"""
This resource cannot have children, hence this is always ignored.
"""
def render(self, request):
"""
Render the resource. This will takeover the transport underlying
the request, create a :class:`autobahn.twisted.websocket.WebSocketServerProtocol`
and let that do any subsequent communication.
"""
# Create Autobahn WebSocket protocol.
#
protocol = self._factory.buildProtocol(request.transport.getPeer())
if not protocol:
# If protocol creation fails, we signal "internal server error"
request.setResponseCode(500)
return b""
# Take over the transport from Twisted Web
#
transport, request.transport = request.transport, None
# Connect the transport to our protocol. Once #3204 is fixed, there
# may be a cleaner way of doing this.
# http://twistedmatrix.com/trac/ticket/3204
#
if isinstance(transport, ProtocolWrapper):
# i.e. TLS is a wrapping protocol
transport.wrappedProtocol = protocol
else:
transport.protocol = protocol
protocol.makeConnection(transport)
# We recreate the request and forward the raw data. This is somewhat
# silly (since Twisted Web already did the HTTP request parsing
# which we will do a 2nd time), but it's totally non-invasive to our
# code. Maybe improve this.
#
if PY3:
data = request.method + b' ' + request.uri + b' HTTP/1.1\x0d\x0a'
for h in request.requestHeaders.getAllRawHeaders():
data += h[0] + b': ' + b",".join(h[1]) + b'\x0d\x0a'
data += b"\x0d\x0a"
data += request.content.read()
else:
data = "%s %s HTTP/1.1\x0d\x0a" % (request.method, request.uri)
for h in request.requestHeaders.getAllRawHeaders():
data += "%s: %s\x0d\x0a" % (h[0], ",".join(h[1]))
data += "\x0d\x0a"
protocol.dataReceived(data)
return NOT_DONE_YET
|
{
"content_hash": "20fea201b2749a609b66180eec947d16",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 157,
"avg_line_length": 35.28888888888889,
"alnum_prop": 0.6446263643996641,
"repo_name": "RyanHope/AutobahnPython",
"id": "79d979f49a325a4e01c4ab8b621a296dc24900da",
"size": "6042",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "autobahn/twisted/resource.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3648"
},
{
"name": "Python",
"bytes": "983364"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.contrib import admin
from ModelTest_app.models import Contact
# Register your models here.
admin.site.register(Contact)
|
{
"content_hash": "d2dd6546c98539830f5b9eb505cb444b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 40,
"avg_line_length": 21.875,
"alnum_prop": 0.8,
"repo_name": "GunnerJnr/_CodeInstitute",
"id": "817ec6028675edb4cb41e85b5d843db631eb8277",
"size": "199",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Stream-3/Full-Stack-Development/4.Hello-Django-Administration/2.Wire-Up-Models-To-Admin/ModelTest/ModelTest_app/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "336"
},
{
"name": "CSS",
"bytes": "2545480"
},
{
"name": "HTML",
"bytes": "708226"
},
{
"name": "JavaScript",
"bytes": "1984479"
},
{
"name": "Python",
"bytes": "1727585"
},
{
"name": "Shell",
"bytes": "75780"
},
{
"name": "TSQL",
"bytes": "642"
}
],
"symlink_target": ""
}
|
import os
from os.path import join
from settings import *
from common import delete
def cleanall(projecthome, verbose=False):
"""
Delete all BLCI configurations and metadata files
**Positional Arguments:**
projecthome:
- The path to the root of the project
**Optional Arguments:**
verbose:
- Print messages when actions are taken
"""
clean_bl_config(projecthome, verbose)
clean_dependencies(projecthome, verbose)
clean_base_ci_config(projecthome, verbose)
clean_git(projecthome, verbose)
def clean_bl_config(projecthome, verbose=False):
"""
Delete BLCI configuration file
**Positional Arguments:**
projecthome:
- The path to the root of the project
**Optional Arguments:**
verbose:
- Print messages when actions are taken
"""
delete(join(projecthome, BL_DEFAULT_CONFIG_FN), verbose)
def clean_dependencies(projecthome, verbose=False):
"""
Delete BLCI dependency metadata file
**Positional Arguments:**
projecthome:
- The path to the root of the project
"""
delete(join(projecthome, BL_DEFAULT_DEPS_FN), verbose)
def clean_base_ci_config(projecthome, verbose=False):
"""
Delete Travis-CI configuration file
**Positional Arguments:**
projecthome:
- The path to the root of the project
**Optional Arguments:**
verbose:
- Print messages when actions are taken
"""
delete(join(projecthome, BASE_CI_CONFIG_FN), verbose)
def clean_git(projecthome, verbose=False):
"""
Delete Git ignore file
**Positional Arguments:**
projecthome:
- The path to the root of the project
**Optional Arguments:**
verbose:
- Print messages when actions are taken
"""
delete(join(projecthome, ".git"))
delete(join(projecthome, GIT_IGNORE_FN))
|
{
"content_hash": "4b8b09bde64a0ca1448ef5751c6c423c",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 60,
"avg_line_length": 21.60919540229885,
"alnum_prop": 0.6595744680851063,
"repo_name": "neurodata/blci",
"id": "5a2d07b006ea71a75e21d0d07f367dca01e764e6",
"size": "2578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "include/clean.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "57783"
},
{
"name": "Shell",
"bytes": "995"
}
],
"symlink_target": ""
}
|
from constants import EOW, EOS, SOS
def w2tok(word, maxlen, pad=None):
if len(word) >= maxlen - 1:
word = word[0: maxlen - 1]
word += EOW
if pad is not None and len(word) < maxlen:
word += pad * (maxlen - len(word))
assert len(word) == maxlen
return word
def w2str(word):
if word == SOS:
return '<S>'
elif word == EOS:
return '</S>'
return word
|
{
"content_hash": "dd6c58b40e22aee4793f3132ca7a44fc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 44,
"avg_line_length": 20.157894736842106,
"alnum_prop": 0.5926892950391645,
"repo_name": "milankinen/c2w2c",
"id": "ca593cf728142d48e6485dc5474a455867dd86a7",
"size": "383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46229"
},
{
"name": "Shell",
"bytes": "1620"
}
],
"symlink_target": ""
}
|
from os.path import dirname
from ipkg.build import Formula, File
class foo(Formula):
name = 'foo'
version = '1.0'
sources = File(dirname(__file__) + '/../../sources/foo-1.0.tar.gz')
platform = 'any'
def install(self):
self.run_cp(['README', self.environment.prefix + '/foo.README'])
|
{
"content_hash": "59f7611efea5a2063715ad116c6a6f7c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 72,
"avg_line_length": 22.571428571428573,
"alnum_prop": 0.6107594936708861,
"repo_name": "pmuller/ipkg",
"id": "1a4f1c919b299942a82c6932d7f00aabeb8479d0",
"size": "316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/data/formulas/foo/foo-1.0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "139473"
}
],
"symlink_target": ""
}
|
"""
Created on Tue May 30 16:55:32 2017
@author: azkei
The operations of permutation(random reordering) of a Series or the rows
of a DataFrame are easy to do using the numpy.random.permutation() function
"""
# For this example, we create a DataFrame containing integers in ascending order
nframe = pd.DataFrame(np.arange(25).reshape(5,5))
nframe
# Create an array of 5 integers, from 0-4 arranged in random order with the
# permutation() function. This will be the new order in which to set the values
# of a row of DataFrame
new_order = np.random.permutation(5)
new_order
# Apply it to the DataFrame using the take() function
nframe.take(new_order)
# The order of the rows has been changed; i.e. the indices follow the same order
# as new_order
# We can submit even a portion of the entire DataFrame to a permutation.
# It generates an array that has a sequence limited to a certain range
# e.g. in our case 2 to 4
new_order=[3,4,2]
nframe.take(new_order)
# 2. Random Sampling
# We have just seen how to extract a portion of a DataFrame determined
# by subjecting it to permutation.
# Sometimes, when we have a huge DataFrame, we may need to sample is randomly,
# The quickest way to do this is by using np.random.randint() function
sample = np.random.randint(0,len(nframe),size=3)
sample
nframe.take(sample)
# As we can see from this random sampling, we can get the same sample even more times
|
{
"content_hash": "444f8eedec784ee311400dd0c4466e79",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 85,
"avg_line_length": 41.1764705882353,
"alnum_prop": 0.7585714285714286,
"repo_name": "jjsalomon/python-analytics",
"id": "c926f7922fd099e947f0e73f997bba5871df5785",
"size": "1424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas3 - Data Manipulation/pandas8 - Data Transformation - Permutation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "835"
},
{
"name": "Python",
"bytes": "75839"
}
],
"symlink_target": ""
}
|
import os
import subprocess
from pathlib import Path
import typing as T
def ls_as_bytestream() -> bytes:
if os.path.exists('.git'):
return subprocess.run(['git', 'ls-tree', '-r', '--name-only', 'HEAD'],
stdout=subprocess.PIPE).stdout
files = [str(p) for p in Path('.').glob('**/*')
if not p.is_dir() and
not next((x for x in p.parts if x.startswith('.')), None)]
return '\n'.join(files).encode()
def cscope() -> int:
ls = b'\n'.join([b'"%s"' % f for f in ls_as_bytestream().split()])
return subprocess.run(['cscope', '-v', '-b', '-i-'], input=ls).returncode
def ctags() -> int:
ls = ls_as_bytestream()
return subprocess.run(['ctags', '-L-'], input=ls).returncode
def etags() -> int:
ls = ls_as_bytestream()
return subprocess.run(['etags', '-'], input=ls).returncode
def run(args: T.List[str]) -> int:
tool_name = args[0]
srcdir_name = args[1]
os.chdir(srcdir_name)
assert tool_name in ['cscope', 'ctags', 'etags']
res = globals()[tool_name]()
assert isinstance(res, int)
return res
|
{
"content_hash": "93b8c94ded50dd947c7dc86040394226",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 28.71794871794872,
"alnum_prop": 0.5705357142857143,
"repo_name": "QuLogic/meson",
"id": "9098efb395c582350660415c6a55fb392808b69d",
"size": "1708",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "mesonbuild/scripts/tags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4862"
},
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "196268"
},
{
"name": "C#",
"bytes": "1130"
},
{
"name": "C++",
"bytes": "59203"
},
{
"name": "CMake",
"bytes": "35279"
},
{
"name": "Cuda",
"bytes": "10458"
},
{
"name": "D",
"bytes": "5313"
},
{
"name": "Emacs Lisp",
"bytes": "919"
},
{
"name": "Fortran",
"bytes": "12020"
},
{
"name": "Genie",
"bytes": "477"
},
{
"name": "HTML",
"bytes": "897"
},
{
"name": "Inno Setup",
"bytes": "354"
},
{
"name": "Java",
"bytes": "2900"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "187"
},
{
"name": "Limbo",
"bytes": "28"
},
{
"name": "Meson",
"bytes": "527053"
},
{
"name": "Objective-C",
"bytes": "688"
},
{
"name": "Objective-C++",
"bytes": "381"
},
{
"name": "PowerShell",
"bytes": "3210"
},
{
"name": "Python",
"bytes": "3598710"
},
{
"name": "Roff",
"bytes": "625"
},
{
"name": "Rust",
"bytes": "3192"
},
{
"name": "Shell",
"bytes": "10416"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10025"
},
{
"name": "Verilog",
"bytes": "709"
},
{
"name": "Vim Script",
"bytes": "9743"
},
{
"name": "Yacc",
"bytes": "103"
}
],
"symlink_target": ""
}
|
'''
Classes:
Dataset - Container for a dataset's attributes and data.
Bounds - Container for holding spatial and temporal bounds information
for operations on a Dataset.
'''
import os
import numpy
import logging
import datetime as dt
from mpl_toolkits.basemap import Basemap
import netCDF4
import ocw
import ocw.utils as utils
logger = logging.getLogger(__name__)
class Dataset:
'''Container for a dataset's attributes and data.'''
def __init__(self, lats, lons, times, values, variable=None, units=None,
origin=None, name=""):
'''Default Dataset constructor
:param lats: One dimensional numpy array of unique latitude values.
:type lats: :class:`numpy.ndarray`
:param lons: One dimensional numpy array of unique longitude values.
:type lons: :class:`numpy.ndarray`
:param times: One dimensional numpy array of unique python datetime
objects.
:type times: :class:`numpy.ndarray`
:param values: Three dimensional numpy array of parameter values with
shape [timesLength, latsLength, lonsLength].
:type values: :class:`numpy.ndarray`
:param variable: Name of the value variable.
:type variable: :mod:`string`
:param units: Name of the value units
:type units: :mod:`string`
:param name: An optional string name for the Dataset.
:type name: :mod:`string`
:param origin: An optional object used to specify information on where
this dataset was loaded from.
:type origin: :class:`dict`
:raises: ValueError
'''
self._validate_inputs(lats, lons, times, values)
lats, lons, values = utils.normalize_lat_lon_values(lats, lons, values)
self.lats = lats
self.lons = lons
self.times = times
self.values = values
self.variable = variable
self.units = units
self.name = name
self.origin = origin
def spatial_boundaries(self):
'''Calculate the spatial boundaries.
:returns: The Dataset's bounding latitude and longitude values as a
tuple in the form (min_lat, max_lat, min_lon, max_lon)
:rtype: :func:`tuple` of the form (:class:`float`, :class:`float`,
:class:`float`, :class:`float`).
'''
return (float(numpy.min(self.lats)), float(numpy.max(self.lats)),
float(numpy.min(self.lons)), float(numpy.max(self.lons)))
def temporal_boundaries(self):
'''Calculate the temporal range
:returns: The start and end date of the Dataset's temporal range as
a tuple in the form (start_time, end_time).
:rtype: :func:`tuple` of the form (:class:`datetime.datetime`,
:class:`datetime.datetime`)
'''
sorted_time = numpy.sort(self.times)
start_time = sorted_time[0]
end_time = sorted_time[-1]
return (start_time, end_time)
def spatial_resolution(self):
'''Calculate the latitudinal and longitudinal spatial resolution.
If self.lats and self.lons are from curvilinear coordinates,
the output resolutions are approximate values.
:returns: The Dataset's latitudinal and longitudinal spatial resolution
as a tuple of the form (lat_resolution, lon_resolution).
:rtype: (:class:`float`, :class:`float`)
'''
if self.lats.ndim == 1 and self.lons.ndim == 1:
sorted_lats = numpy.sort(list(set(self.lats)))
sorted_lons = numpy.sort(list(set(self.lons)))
lat_resolution = sorted_lats[1] - sorted_lats[0]
lon_resolution = sorted_lons[1] - sorted_lons[0]
if self.lats.ndim == 2 and self.lons.ndim == 2:
lat_resolution = self.lats[1, 1] - self.lats[0, 0]
lon_resolution = self.lons[1, 1] - self.lons[0, 0]
return (lat_resolution, lon_resolution)
def temporal_resolution(self):
'''Calculate the temporal resolution.
:raises ValueError: If timedelta.days as calculated from the sorted \
list of times is an unrecognized value a ValueError is raised.
:returns: The temporal resolution.
:rtype: :mod:`string`
'''
sorted_times = numpy.sort(self.times)
time_resolution = sorted_times[1] - sorted_times[0]
num_days = time_resolution.days
if num_days == 0:
num_hours = time_resolution.seconds / 3600
time_resolution = 'hourly' if num_hours >= 1 else 'minutely'
elif num_days == 1:
time_resolution = 'daily'
elif num_days <= 31:
time_resolution = 'monthly'
elif num_days > 31:
time_resolution = 'yearly'
return time_resolution
def _validate_inputs(self, lats, lons, times, values):
"""Check that Dataset inputs are valid.
:raises: ValueError
"""
err_msg = None
# Setup and Check parameter dimensionality is correct
lat_dim = len(lats.shape)
lon_dim = len(lons.shape)
time_dim = len(times.shape)
value_dim = len(values.shape)
lat_count = lats.shape[0]
lon_count = lons.shape[0]
if lat_dim == 2 and lon_dim == 2:
lon_count = lons.shape[1]
time_count = times.shape[0]
if time_dim != 1:
err_msg = ("Time Array should be 1 dimensional. %s dimensions"
" found." % time_dim)
elif value_dim < 2:
err_msg = ("Value Array should be at least 2 dimensional."
" %s dimensions found." % value_dim)
# Finally check that the Values array conforms to the proper shape
if value_dim == 2:
if (values.shape[0] != time_count and
values.shape != (lat_count, lon_count)):
err_msg = """Value Array must be of shape (lats, lons) or (times, locations).
Expected shape (%s, %s) but received (%s, %s)""" % (
lat_count,
lon_count,
values.shape[0],
values.shape[1])
if (value_dim == 3 and
values.shape != (time_count, lat_count, lon_count)):
err_msg = """Value Array must be of shape (times, lats, lons).
Expected shape (%s, %s, %s) but received (%s, %s, %s)""" % (
time_count,
lat_count,
lon_count,
values.shape[0],
values.shape[1],
values.shape[2])
if err_msg:
logger.error(err_msg)
raise ValueError(err_msg)
def __str__(self):
lat_min, lat_max, lon_min, lon_max = self.spatial_boundaries()
start, end = self.temporal_boundaries()
lat_range = "({}, {})".format(lat_min, lon_min)
lon_range = "({}, {})".format(lon_min, lon_min)
temporal_boundaries = "({}, {})".format(start, end)
formatted_repr = (
"<Dataset - name: {}, "
"lat-range: {}, "
"lon-range: {}, "
"temporal_boundaries: {}, "
"var: {}, "
"units: {}>"
)
return formatted_repr.format(
self.name if self.name != "" else None,
lat_range,
lon_range,
temporal_boundaries,
self.variable,
self.units
)
class Bounds(object):
'''Container for holding spatial and temporal bounds information.
Certain operations require valid bounding information to be present for
correct functioning. Bounds guarantees that a function receives well
formed information without the need to do the validation manually.
boundary_type may be one of the following:
* 'rectangular'
* 'CORDEX (CORDEX region name)': pre-defined CORDEX boundary
* 'us_states': an array of US states abbreviation is required (ex) us_states = ['CA','NV'])
* 'countries': an array of county names is required (ex) countries = ['United States','Canada','Mexico']
* 'user': user_mask_file in a netCDF format with two dimensional mask variable is required.
If boundary_type == 'rectangular', spatial and temporal bounds must follow the following guidelines.
* Latitude values must be in the range [-90, 90]
* Longitude values must be in the range [-180, 180]
* Lat/Lon Min values must be less than the corresponding Lat/Lon Max
values.
Temporal bounds must a valid datetime object
'''
def __init__(self, boundary_type='rectangular',
us_states=None, countries=None,
user_mask_file=None, mask_variable_name=None, longitude_name=None, latitude_name=None,
lat_min=-90, lat_max=90, lon_min=-180, lon_max=180,
start=None, end=None):
'''Default Bounds constructor
:param boundary_type: The type of spatial subset boundary.
:type boundary_type: :mod:`string'
:param lat_min: The minimum latitude bound.
:type lat_min: :class:`float`
:param lat_min: The minimum latitude bound.
:type lat_min: :class:`float`
:param lat_max: The maximum latitude bound.
:type lat_max: :class:`float`
:param lon_min: The minimum longitude bound.
:type lon_min: :class:`float`
:param lon_max: The maximum longitude bound.
:type lon_max: :class:`float`
:param start: An optional datetime object for the starting
datetime bound.
:type start: :class:`datetime.datetime`
:param end: An optional datetime object for the ending datetime bound.
:type end: :class:`datetime.datetime`
:raises: ValueError
'''
self.boundary_type = boundary_type
if start:
self._start = start
else:
self._start = None
if end:
self._end = end
else:
self._end = None
if boundary_type == 'us_states':
self.masked_regions = utils.shapefile_boundary(
boundary_type, us_states)
if boundary_type == 'countries':
self.masked_regions = utils.shapefile_boundary(
boundary_type, countries)
if boundary_type == 'user':
file_object = netCDF4.Dataset(user_mask_file)
self.mask_variable = file_object.variables[mask_variable_name][:]
mask_longitude = file_object.variables[longitude_name][:]
mask_latitude = file_object.variables[latitude_name][:]
if mask_longitude.ndim == 1 and mask_latitude.ndim == 1:
self.mask_longitude, self.mask_latitude = numpy.meshgrid(
mask_longitude, mask_latitude)
elif mask_longitude.ndim == 2 and mask_latitude.ndim == 2:
self.mask_longitude = mask_longitude
self.mask_latitude = mask_latitude
if boundary_type == 'rectangular':
if not (-90 <= float(lat_min) <= 90) or float(lat_min) > float(lat_max):
error = "Attempted to set lat_min to invalid value: %s" % (
lat_min)
logger.error(error)
raise ValueError(error)
if not (-90 <= float(lat_max) <= 90):
error = "Attempted to set lat_max to invalid value: %s" % (
lat_max)
logger.error(error)
raise ValueError(error)
if not (-180 <= float(lon_min) <= 180) or float(lon_min) > float(lon_max):
error = "Attempted to set lon_min to invalid value: %s" % (
lon_min)
logger.error(error)
raise ValueError(error)
if not (-180 <= float(lon_max) <= 180):
error = "Attempted to set lat_max to invalid value: %s" % (
lon_max)
logger.error(error)
raise ValueError(error)
self.lat_min = float(lat_min)
self.lat_max = float(lat_max)
self.lon_min = float(lon_min)
self.lon_max = float(lon_max)
if boundary_type[:6].upper() == 'CORDEX':
self.lat_min, self.lat_max, self.lon_min, self.lon_max = utils.CORDEX_boundary(
boundary_type[6:].replace(" ", "").lower())
@property
def start(self):
return self._start
@start.setter
def start(self, value):
if self._end:
if not (type(value) is dt.datetime and value < self._end):
error = "Attempted to set start to invalid value: %s" % (value)
logger.error(error)
raise ValueError(error)
self._start = value
@property
def end(self):
return self._end
@end.setter
def end(self, value):
if self._start:
if not (type(value) is dt.datetime and value > self._start):
error = "Attempted to set end to invalid value: %s" % (value)
logger.error(error)
raise ValueError(error)
self._end = value
|
{
"content_hash": "7d93b508f2818aeeea309a866ffb1065",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 108,
"avg_line_length": 36.67313019390582,
"alnum_prop": 0.5679431981267468,
"repo_name": "jarifibrahim/climate",
"id": "196913a13445b888a154559020342465db388438",
"size": "14026",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ocw/dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "24139"
},
{
"name": "CSS",
"bytes": "2587"
},
{
"name": "HTML",
"bytes": "38243"
},
{
"name": "JavaScript",
"bytes": "124509"
},
{
"name": "OpenEdge ABL",
"bytes": "14713"
},
{
"name": "Python",
"bytes": "901332"
},
{
"name": "Ruby",
"bytes": "537"
},
{
"name": "Shell",
"bytes": "4808"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from ..._serialization import Deserializer, Serializer
from ._configuration import ComputeManagementClientConfiguration
from .operations import DiskAccessesOperations, DiskEncryptionSetsOperations, DisksOperations, SnapshotsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ComputeManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""Compute Client.
:ivar disks: DisksOperations operations
:vartype disks: azure.mgmt.compute.v2020_06_30.aio.operations.DisksOperations
:ivar snapshots: SnapshotsOperations operations
:vartype snapshots: azure.mgmt.compute.v2020_06_30.aio.operations.SnapshotsOperations
:ivar disk_encryption_sets: DiskEncryptionSetsOperations operations
:vartype disk_encryption_sets:
azure.mgmt.compute.v2020_06_30.aio.operations.DiskEncryptionSetsOperations
:ivar disk_accesses: DiskAccessesOperations operations
:vartype disk_accesses: azure.mgmt.compute.v2020_06_30.aio.operations.DiskAccessesOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2020-06-30". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ComputeManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.disks = DisksOperations(self._client, self._config, self._serialize, self._deserialize)
self.snapshots = SnapshotsOperations(self._client, self._config, self._serialize, self._deserialize)
self.disk_encryption_sets = DiskEncryptionSetsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.disk_accesses = DiskAccessesOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ComputeManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
{
"content_hash": "fe492fd2d91529f5142a7577b4119933",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 115,
"avg_line_length": 48.989583333333336,
"alnum_prop": 0.7161386349138847,
"repo_name": "Azure/azure-sdk-for-python",
"id": "dd9494935b7b95690939c8808eef0c80679c02c2",
"size": "5171",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_30/aio/_compute_management_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""
DEPRECATED functions that implement the same command line interface as the
legacy glance client.
"""
import argparse
import sys
import urlparse
from glanceclient.common import utils
SUCCESS = 0
FAILURE = 1
def get_image_fields_from_args(args):
"""
Validate the set of arguments passed as field name/value pairs
and return them as a mapping.
"""
fields = {}
for arg in args:
pieces = arg.strip(',').split('=')
if len(pieces) != 2:
msg = ("Arguments should be in the form of field=value. "
"You specified %s." % arg)
raise RuntimeError(msg)
fields[pieces[0]] = pieces[1]
return fields
def get_image_filters_from_args(args):
"""Build a dictionary of query filters based on the supplied args."""
try:
fields = get_image_fields_from_args(args)
except RuntimeError as e:
print e
return FAILURE
SUPPORTED_FILTERS = ['name', 'disk_format', 'container_format', 'status',
'min_ram', 'min_disk', 'size_min', 'size_max',
'changes-since']
filters = {}
for (key, value) in fields.items():
if key not in SUPPORTED_FILTERS:
key = 'property-%s' % (key,)
filters[key] = value
return filters
def print_image_formatted(client, image):
"""
Formatted print of image metadata.
:param client: The Glance client object
:param image: The image metadata
"""
uri_parts = urlparse.urlparse(client.endpoint)
if uri_parts.port:
hostbase = "%s:%s" % (uri_parts.hostname, uri_parts.port)
else:
hostbase = uri_parts.hostname
print "URI: %s://%s/v1/images/%s" % (uri_parts.scheme, hostbase, image.id)
print "Id: %s" % image.id
print "Public: " + (image.is_public and "Yes" or "No")
print "Protected: " + (image.protected and "Yes" or "No")
print "Name: %s" % getattr(image, 'name', '')
print "Status: %s" % image.status
print "Size: %d" % int(image.size)
print "Disk format: %s" % getattr(image, 'disk_format', '')
print "Container format: %s" % getattr(image, 'container_format', '')
print "Minimum Ram Required (MB): %s" % image.min_ram
print "Minimum Disk Required (GB): %s" % image.min_disk
if hasattr(image, 'owner'):
print "Owner: %s" % image.owner
if len(image.properties) > 0:
for k, v in image.properties.items():
print "Property '%s': %s" % (k, v)
print "Created at: %s" % image.created_at
if hasattr(image, 'deleted_at'):
print "Deleted at: %s" % image.deleted_at
if hasattr(image, 'updated_at'):
print "Updated at: %s" % image.updated_at
@utils.arg('--silent-upload', action="store_true",
help="DEPRECATED! Animations are always off.")
@utils.arg('fields', default=[], nargs='*', help=argparse.SUPPRESS)
def do_add(gc, args):
"""DEPRECATED! Use image-create instead."""
try:
fields = get_image_fields_from_args(args.fields)
except RuntimeError as e:
print e
return FAILURE
image_meta = {
'is_public': utils.string_to_bool(
fields.pop('is_public', 'False')),
'protected': utils.string_to_bool(
fields.pop('protected', 'False')),
'min_disk': fields.pop('min_disk', 0),
'min_ram': fields.pop('min_ram', 0),
}
#NOTE(bcwaldon): Use certain properties only if they are explicitly set
optional = ['id', 'name', 'disk_format', 'container_format']
for field in optional:
if field in fields:
image_meta[field] = fields.pop(field)
# Strip any args that are not supported
unsupported_fields = ['status', 'size']
for field in unsupported_fields:
if field in fields.keys():
print 'Found non-settable field %s. Removing.' % field
fields.pop(field)
# We need either a location or image data/stream to add...
image_data = None
if 'location' in fields.keys():
image_meta['location'] = fields.pop('location')
if 'checksum' in fields.keys():
image_meta['checksum'] = fields.pop('checksum')
elif 'copy_from' in fields.keys():
image_meta['copy_from'] = fields.pop('copy_from')
else:
# Grab the image data stream from stdin or redirect,
# otherwise error out
image_data = sys.stdin
image_meta['data'] = image_data
# allow owner to be set when image is created
if 'owner' in fields.keys():
image_meta['owner'] = fields.pop('owner')
# Add custom attributes, which are all the arguments remaining
image_meta['properties'] = fields
if not args.dry_run:
image = gc.images.create(**image_meta)
print "Added new image with ID: %s" % image.id
if args.verbose:
print "Returned the following metadata for the new image:"
for k, v in sorted(image.to_dict().items()):
print " %(k)30s => %(v)s" % locals()
else:
print "Dry run. We would have done the following:"
def _dump(dict):
for k, v in sorted(dict.items()):
print " %(k)30s => %(v)s" % locals()
print "Add new image with metadata:"
_dump(image_meta)
return SUCCESS
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to describe.')
@utils.arg('fields', default=[], nargs='*', help=argparse.SUPPRESS)
def do_update(gc, args):
"""DEPRECATED! Use image-update instead."""
try:
fields = get_image_fields_from_args(args.fields)
except RuntimeError as e:
print e
return FAILURE
image_meta = {}
# Strip any args that are not supported
nonmodifiable_fields = ['created_at', 'deleted_at', 'deleted',
'updated_at', 'size', 'status']
for field in nonmodifiable_fields:
if field in fields.keys():
print 'Found non-modifiable field %s. Removing.' % field
fields.pop(field)
base_image_fields = ['disk_format', 'container_format', 'name',
'min_disk', 'min_ram', 'location', 'owner',
'copy_from']
for field in base_image_fields:
fvalue = fields.pop(field, None)
if fvalue is not None:
image_meta[field] = fvalue
# Have to handle "boolean" values specially...
if 'is_public' in fields:
image_meta['is_public'] = utils.string_to_bool(fields.pop('is_public'))
if 'protected' in fields:
image_meta['protected'] = utils.string_to_bool(fields.pop('protected'))
# Add custom attributes, which are all the arguments remaining
image_meta['properties'] = fields
if not args.dry_run:
image = gc.images.update(args.id, **image_meta)
print "Updated image %s" % args.id
if args.verbose:
print "Updated image metadata for image %s:" % args.id
print_image_formatted(gc, image)
else:
def _dump(dict):
for k, v in sorted(dict.items()):
print " %(k)30s => %(v)s" % locals()
print "Dry run. We would have done the following:"
print "Update existing image with metadata:"
_dump(image_meta)
return SUCCESS
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to describe.')
def do_delete(gc, args):
"""DEPRECATED! Use image-delete instead."""
if not (args.force or
user_confirm("Delete image %s?" % args.id, default=False)):
print 'Not deleting image %s' % args.id
return FAILURE
gc.images.get(args.id).delete()
@utils.arg('id', metavar='<IMAGE_ID>', help='ID of image to describe.')
def do_show(gc, args):
"""DEPRECATED! Use image-show instead."""
image = gc.images.get(args.id)
print_image_formatted(gc, image)
return SUCCESS
def _get_images(gc, args):
parameters = {
'filters': get_image_filters_from_args(args.filters),
'page_size': args.limit,
}
optional_kwargs = ['marker', 'sort_key', 'sort_dir']
for kwarg in optional_kwargs:
value = getattr(args, kwarg, None)
if value is not None:
parameters[kwarg] = value
return gc.images.list(**parameters)
@utils.arg('--limit', dest="limit", metavar="LIMIT", default=10,
type=int, help="Page size to use while requesting image metadata")
@utils.arg('--marker', dest="marker", metavar="MARKER",
default=None, help="Image index after which to begin pagination")
@utils.arg('--sort_key', dest="sort_key", metavar="KEY",
help="Sort results by this image attribute.")
@utils.arg('--sort_dir', dest="sort_dir", metavar="[desc|asc]",
help="Sort results in this direction.")
@utils.arg('filters', default=[], nargs='*', help=argparse.SUPPRESS)
def do_index(gc, args):
"""DEPRECATED! Use image-list instead."""
images = _get_images(gc, args)
if not images:
return SUCCESS
pretty_table = PrettyTable()
pretty_table.add_column(36, label="ID")
pretty_table.add_column(30, label="Name")
pretty_table.add_column(20, label="Disk Format")
pretty_table.add_column(20, label="Container Format")
pretty_table.add_column(14, label="Size", just="r")
print pretty_table.make_header()
for image in images:
print pretty_table.make_row(image.id,
image.name,
image.disk_format,
image.container_format,
image.size)
@utils.arg('--limit', dest="limit", metavar="LIMIT", default=10,
type=int, help="Page size to use while requesting image metadata")
@utils.arg('--marker', dest="marker", metavar="MARKER",
default=None, help="Image index after which to begin pagination")
@utils.arg('--sort_key', dest="sort_key", metavar="KEY",
help="Sort results by this image attribute.")
@utils.arg('--sort_dir', dest="sort_dir", metavar="[desc|asc]",
help="Sort results in this direction.")
@utils.arg('filters', default='', nargs='*', help=argparse.SUPPRESS)
def do_details(gc, args):
"""DEPRECATED! Use image-list instead."""
images = _get_images(gc, args)
for i, image in enumerate(images):
if i == 0:
print "=" * 80
print_image_formatted(gc, image)
print "=" * 80
def do_clear(gc, args):
"""DEPRECATED!"""
if not (args.force or
user_confirm("Delete all images?", default=False)):
print 'Not deleting any images'
return FAILURE
images = gc.images.list()
for image in images:
if args.verbose:
print 'Deleting image %s "%s" ...' % (image.id, image.name),
try:
image.delete()
if args.verbose:
print 'done'
except Exception as e:
print 'Failed to delete image %s' % image.id
print e
return FAILURE
return SUCCESS
@utils.arg('image_id', help='Image ID to filters members with.')
def do_image_members(gc, args):
"""DEPRECATED! Use member-list instead."""
members = gc.image_members.list(image=args.image_id)
sharers = 0
# Output the list of members
for memb in members:
can_share = ''
if memb.can_share:
can_share = ' *'
sharers += 1
print "%s%s" % (memb.member_id, can_share)
# Emit a footnote
if sharers > 0:
print "\n(*: Can share image)"
@utils.arg('--can-share', default=False, action="store_true",
help="Allow member to further share image.")
@utils.arg('member_id',
help='ID of member (typically tenant) to grant access.')
def do_member_images(gc, args):
"""DEPRECATED! Use member-list instead."""
members = gc.image_members.list(member=args.member_id)
if not len(members):
print "No images shared with member %s" % args.member_id
return SUCCESS
sharers = 0
# Output the list of images
for memb in members:
can_share = ''
if memb.can_share:
can_share = ' *'
sharers += 1
print "%s%s" % (memb.image_id, can_share)
# Emit a footnote
if sharers > 0:
print "\n(*: Can share image)"
@utils.arg('--can-share', default=False, action="store_true",
help="Allow member to further share image.")
@utils.arg('image_id', help='ID of image to describe.')
@utils.arg('member_id',
help='ID of member (typically tenant) to grant access.')
def do_members_replace(gc, args):
"""DEPRECATED!"""
if not args.dry_run:
for member in gc.image_members.list(image=args.image_id):
gc.image_members.delete(args.image_id, member.member_id)
gc.image_members.create(args.image_id, args.member_id, args.can_share)
else:
print "Dry run. We would have done the following:"
print ('Replace members of image %s with "%s"'
% (args.image_id, args.member_id))
if args.can_share:
print "New member would have been able to further share image."
@utils.arg('--can-share', default=False, action="store_true",
help="Allow member to further share image.")
@utils.arg('image_id', help='ID of image to describe.')
@utils.arg('member_id',
help='ID of member (typically tenant) to grant access.')
def do_member_add(gc, args):
"""DEPRECATED! Use member-create instead."""
if not args.dry_run:
gc.image_members.create(args.image_id, args.member_id, args.can_share)
else:
print "Dry run. We would have done the following:"
print ('Add "%s" to membership of image %s' %
(args.member_id, args.image_id))
if args.can_share:
print "New member would have been able to further share image."
def user_confirm(prompt, default=False):
"""
Yes/No question dialog with user.
:param prompt: question/statement to present to user (string)
:param default: boolean value to return if empty string
is received as response to prompt
"""
if default:
prompt_default = "[Y/n]"
else:
prompt_default = "[y/N]"
# for bug 884116, don't issue the prompt if stdin isn't a tty
if not (hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()):
return default
answer = raw_input("%s %s " % (prompt, prompt_default))
if answer == "":
return default
else:
return answer.lower() in ("yes", "y")
class PrettyTable(object):
"""Creates an ASCII art table
Example:
ID Name Size Hits
--- ----------------- ------------ -----
122 image 22 0
"""
def __init__(self):
self.columns = []
def add_column(self, width, label="", just='l'):
"""Add a column to the table
:param width: number of characters wide the column should be
:param label: column heading
:param just: justification for the column, 'l' for left,
'r' for right
"""
self.columns.append((width, label, just))
def make_header(self):
label_parts = []
break_parts = []
for width, label, _ in self.columns:
# NOTE(sirp): headers are always left justified
label_part = self._clip_and_justify(label, width, 'l')
label_parts.append(label_part)
break_part = '-' * width
break_parts.append(break_part)
label_line = ' '.join(label_parts)
break_line = ' '.join(break_parts)
return '\n'.join([label_line, break_line])
def make_row(self, *args):
row = args
row_parts = []
for data, (width, _, just) in zip(row, self.columns):
row_part = self._clip_and_justify(data, width, just)
row_parts.append(row_part)
row_line = ' '.join(row_parts)
return row_line
@staticmethod
def _clip_and_justify(data, width, just):
# clip field to column width
clipped_data = str(data)[:width]
if just == 'r':
# right justify
justified = clipped_data.rjust(width)
else:
# left justify
justified = clipped_data.ljust(width)
return justified
|
{
"content_hash": "d93285ef1b7a3d43dafee1e7834b5c43",
"timestamp": "",
"source": "github",
"line_count": 493,
"max_line_length": 79,
"avg_line_length": 33.25557809330629,
"alnum_prop": 0.5841415065568771,
"repo_name": "neumerance/deploy",
"id": "c9da52cbe66c2c700a64e53ec39d77384caa1627",
"size": "17025",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": ".venv/lib/python2.7/site-packages/glanceclient/v1/legacy_shell.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "49399"
},
{
"name": "CSS",
"bytes": "769836"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Erlang",
"bytes": "31042"
},
{
"name": "JavaScript",
"bytes": "642626"
},
{
"name": "PHP",
"bytes": "3858"
},
{
"name": "Perl",
"bytes": "386749"
},
{
"name": "Python",
"bytes": "23358678"
},
{
"name": "Racket",
"bytes": "28441"
},
{
"name": "Ruby",
"bytes": "453"
},
{
"name": "Shell",
"bytes": "29414"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalogue', '0002_topicarea'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('role', models.CharField(max_length=2000, blank=True)),
('pledge', models.CharField(max_length=2000, blank=True)),
('is_curator', models.BooleanField(default=False)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
{
"content_hash": "2ea8fd6a738ec9578500a223ff5200da",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 114,
"avg_line_length": 33.32,
"alnum_prop": 0.6002400960384153,
"repo_name": "arjweb/openeye",
"id": "352e98edb90c6430643b66785687254c2074f736",
"size": "857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catalogue/migrations/0003_profile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42691"
},
{
"name": "HTML",
"bytes": "3792"
},
{
"name": "JavaScript",
"bytes": "78671"
},
{
"name": "Python",
"bytes": "17394"
}
],
"symlink_target": ""
}
|
from .login import LoginResource
from .users import UsersResource, UsersListResource
|
{
"content_hash": "673209f070f352c4e7941f599d516f00",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 51,
"avg_line_length": 42.5,
"alnum_prop": 0.8588235294117647,
"repo_name": "cloughrm/Flask-Angular-Template",
"id": "48740ee645219ea7d93d5fd53d7957170dd1830d",
"size": "85",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/pastry/resources/v1/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "784"
},
{
"name": "HTML",
"bytes": "8569"
},
{
"name": "JavaScript",
"bytes": "13928"
},
{
"name": "Python",
"bytes": "11483"
}
],
"symlink_target": ""
}
|
"""
A palindromic number reads the same both ways.
The largest palindrome made from the product of two 2-digit numbers is
9009 = 91 × 99.
Find the largest palindrome made from the product of two 3-digit numbers.
"""
def is_palindrome(string):
if string[::-1] == string:
return True
else:
return False
def main(digits):
""" Brute force algorithm"""
solution = 0
for i in range(10*(digits-1)+1, 10**digits):
for j in range(10*(digits-1)+1, 10**digits):
if (is_palindrome(str(j*i)) and i*j>solution):
solution=i*j
return solution
if __name__ == '__main__':
print(main(3))
|
{
"content_hash": "11154ff1844d7b153299b8dde3447a1a",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 73,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.60790273556231,
"repo_name": "jjangsangy/Project-Euler",
"id": "bad4dfdffdc2d56447be64de1fce2377bb415928",
"size": "683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/004-largest_palindrome_product.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "10525"
},
{
"name": "Go",
"bytes": "391"
},
{
"name": "Python",
"bytes": "3983"
},
{
"name": "Swift",
"bytes": "273"
}
],
"symlink_target": ""
}
|
from flask import Flask, request, session, send_from_directory, render_template
from flask import Blueprint
import json
import api
from api.common import WebSuccess, WebError
from api.annotations import api_wrapper, require_login, require_teacher, require_admin, check_csrf
from api.annotations import block_before_competition, block_after_competition
from api.annotations import log_action
blueprint = Blueprint("problem_api", __name__)
@blueprint.route('', methods=['GET'])
@api_wrapper
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
def get_unlocked_problems_hook():
return WebSuccess(data=api.problem.get_unlocked_problems(api.user.get_user()['tid']))
@blueprint.route('/solved', methods=['GET'])
@api_wrapper
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
def get_solved_problems_hook():
return WebSuccess(api.problem.get_solved_problems(api.user.get_user()['tid']))
@blueprint.route('/solves', methods=['GET'])
@api_wrapper
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
def get_problem_solves_hook():
pid = request.args.get('pid', '')
solves = api.stats.get_problem_solves(pid=pid)
filtered = []
for solve in solves:
team = api.team.get_team(tid=solve["tid"])["team_name"]
date = solve["timestamp"].strftime("%B %d, %Y %I:%M %p")
data = {
"team": team,
"date": date
}
filtered.append(data)
return WebSuccess(data=filtered)
@blueprint.route('/submit', methods=['POST'])
@api_wrapper
@check_csrf
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
@block_after_competition(WebError("The competition is over!"))
def submit_key_hook():
user_account = api.user.get_user()
tid = user_account['tid']
uid = user_account['uid']
pid = request.form.get('pid', '')
key = request.form.get('key', '')
ip = request.remote_addr
result = api.problem.submit_key(tid, pid, key, uid, ip)
if result['correct']:
return WebSuccess(result['message'], result['points'])
else:
return WebError(result['message'], {'code': 'wrong'})
@blueprint.route('/<path:pid>', methods=['GET'])
@api_wrapper
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
@block_after_competition(WebError("The competition is over!"))
def get_single_problem_hook(pid):
problem_info = api.problem.get_problem(pid, tid=api.user.get_user()['tid'])
return WebSuccess(data=problem_info)
@blueprint.route("/hint", methods=['GET'])
@api_wrapper
@require_login
@block_before_competition(WebError("The competition has not begun yet!"))
def request_problem_hint_hook():
@log_action
def hint(pid, source):
return None
source = request.args.get("source")
pid = request.args.get("pid")
if pid is None:
return WebError("Please supply a pid.")
if source is None:
return WebError("You have to supply the source of the hint.")
tid = api.user.get_team()["tid"]
if pid not in api.problem.get_unlocked_pids(tid):
return WebError("Your team hasn't unlocked this problem yet!")
hint(pid, source)
return WebSuccess("Hint noted.")
|
{
"content_hash": "ad7c601857177f9f22c8cbd36015bf5d",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 98,
"avg_line_length": 33.02,
"alnum_prop": 0.6862507571168989,
"repo_name": "stuyCTF/stuyCTF-Platform",
"id": "a22a29cd82b426a2083325ceec4290f5aaf08a23",
"size": "3302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/api/routes/problem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7294"
},
{
"name": "CoffeeScript",
"bytes": "51286"
},
{
"name": "HTML",
"bytes": "57602"
},
{
"name": "Python",
"bytes": "184241"
},
{
"name": "Shell",
"bytes": "4218"
}
],
"symlink_target": ""
}
|
from vitrage.datasources.driver_base import DriverBase
from vitrage import os_clients
class NovaDriverBase(DriverBase):
def __init__(self):
super(NovaDriverBase, self).__init__()
self._client = None
@property
def client(self):
if not self._client:
self._client = os_clients.nova_client()
return self._client
|
{
"content_hash": "5c9bed8f82ea78a9fae217796a1da9ae",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 54,
"avg_line_length": 26.214285714285715,
"alnum_prop": 0.6430517711171662,
"repo_name": "openstack/vitrage",
"id": "9c2045e409dca61b8951e516cc309f22559cb5fa",
"size": "952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vitrage/datasources/nova/nova_driver_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "26541"
},
{
"name": "Mako",
"bytes": "896"
},
{
"name": "Python",
"bytes": "2074427"
},
{
"name": "Shell",
"bytes": "17668"
}
],
"symlink_target": ""
}
|
"""
Definition of App class and the app manager.
"""
import os
import time
import inspect
import logging
import tornado.ioloop
import tornado.web
from ..util.icon import Icon
from ..webruntime import launch
from .clientcode import clientCode, Exporter # global client code
from .serialize import serializer
from .pair import Pair
# Create/get the tornado event loop
_tornado_loop = tornado.ioloop.IOLoop.instance()
# The tornado server, started on start()
_tornado_app = None
class AppManager(object):
""" Manage apps, or more specifically, the proxy objects.
There is one AppManager class (in ``flexx.pair.manager``). It's
purpose is to manage the application classes and instances. Intended
for internal use.
"""
def __init__(self):
# name -> (PairClass, pending, connected) - lists contain proxies
self._proxies = {'__default__': (None, [], [])}
def register_app_class(self, cls):
""" Register a Pair class as being an application.
Applications are identified by the ``__name__`` attribute of
the class. The given class must inherit from ``Pair``.
After registering a class, it becomes possible to connect to
"http://address:port/ClassName".
"""
assert isinstance(cls, type) and issubclass(cls, Pair)
name = cls.__name__
pending, connected = [], []
if name in self._proxies and cls is not self._proxies[name][0]:
oldCls, pending, connected = self._proxies[name]
logging.warn('Re-registering app class %r' % name)
#raise ValueError('App with name %r already registered' % name)
self._proxies[name] = cls, pending, connected
def get_default_proxy(self):
""" Get the default proxy that is used for interactive use.
When a Pair class is created without a proxy, this method
is called to get one.
The default "app" is served at "http://address:port/__default__".
"""
_, pending, connected = self._proxies['__default__']
proxies = pending + connected
if proxies:
return proxies[-1]
else:
runtime = 'notebook' if is_notebook else 'browser' # todo: what runtime?
proxy = Proxy('__default__', runtime, title='Flexx app')
pending.append(proxy)
return proxy
def add_pending_proxy_instance(self, proxy):
""" Add an app instance as a pending app.
This means that the proxy is created from Python and not yet
connected. A runtime has been launched and we're waiting for
it to connect.
"""
assert isinstance(proxy, Proxy)
assert proxy.app_name in self._proxies
cls, pending, connected = self._proxies[proxy.app_name]
if proxy.status == Proxy.STATUS.PENDING:
assert proxy not in pending
pending.append(proxy)
else:
raise RuntimeError('Cannot add proxy instances that are/were '
'already connected')
def connect_client(self, ws, name, app_id=None):
""" Connect an incoming client connection to a proxy object
Called by the websocket object upon connecting, thus initiating
the application. The connection can be for the default app, for
a pending app, or for a fresh app (external connection).
"""
print('connecting', name, app_id)
cls, pending, connected = self._proxies[name]
if name == '__default__':
if pending:
proxy = pending.pop(-1)
else:
proxy = Proxy(name, runtime=None)
elif not app_id:
# Create a fresh proxy - there already is a runtime
proxy = Proxy(cls.__name__, runtime=None)
app = cls(proxy=proxy)
proxy._set_pair_instance(app)
else:
# Search for the app with the specific id
for proxy in pending:
if proxy.id == app_id:
pending.remove(proxy)
break
else:
raise RuntimeError('Asked for app id %r, '
'but could not find it' % app_id)
# Add app to connected, set ws
assert proxy.status == Proxy.STATUS.PENDING
proxy._connect_client(ws)
connected.append(proxy)
return proxy # For the ws
def disconnect_client(self, proxy):
""" Close a connection to a client.
This is called by the websocket when the connection is closed.
The manager will remove the proxy from the list of connected
instances.
"""
cls, pending, connected = self._proxies[proxy.app_name]
try:
connected.remove(proxy)
except ValueError:
pass
proxy.close()
def has_app_name(self, name):
""" Returns True if name is a registered appliciation name
"""
return name in self._proxies.keys()
def get_app_names(self):
""" Get a list of registered application names
"""
return [name for name in self._proxies.keys()]
def get_proxy_by_id(self, name, id):
""" Get proxy object by name and id
"""
cls, pending, connected = self._proxies[name]
for proxy in pending:
if proxy.id == id:
return proxy
for proxy in connected:
if proxy.id == id:
return proxy
# Create global app manager object
manager = AppManager()
# todo: move to ..utils
def port_hash(name):
""" port_hash(name)
Given a string, returns a port number between 49152 and 65535.
(2**14 (16384) different posibilities)
This range is the range for dynamic and/or private ports
(ephemeral ports) specified by iana.org.
The algorithm is deterministic, thus providing a way to map names
to port numbers.
"""
fac = 0xd2d84a61
val = 0
for c in name:
val += ( val>>3 ) + ( ord(c)*fac )
val += (val>>3) + (len(name)*fac)
return 49152 + (val % 2**14)
def init_server(host='localhost', port=None):
""" Initialize the server if it is not already running.
"""
global _tornado_app
# Check that its not already running
if _tornado_app is not None:
return
#raise RuntimeError('flexx.ui server already created')
# Create server
from .serve import FlexxTornadoApplication
_tornado_app = FlexxTornadoApplication()
# Start server (find free port number if port not given)
if port is not None:
_tornado_app.listen(port, host)
else:
for i in range(100):
port = port_hash('flexx+%i' % i)
try:
_tornado_app.listen(port, host)
break
except OSError:
pass # address already in use
else:
raise RuntimeError('Could not bind to free address')
# Notify address, so its easy to e.g. copy and paste in the browser
_tornado_app.serving_at = host, port
print('Serving apps at http://%s:%i/' % (host, port))
# todo: ui.run looks weird in IPython. Maybe ui.load() or start()
def run(): # (runtime='xul', host='localhost', port=None):
""" Start the server and event loop if not already running.
This function generally does not return until the application is
stopped, although it will try to behave nicely in interactive
environments (e.g. Spyder, IEP, Jupyter notebook), so the caller
should take into account that the function may return immediately.
"""
# Get server up
init_server()
# Start event loop
if not (hasattr(_tornado_loop, '_running') and _tornado_loop._running):
_tornado_loop.start()
return JupyterChecker()
is_notebook = False
class JupyterChecker(object):
""" This gets returned by run(), so that in the IPython notebook
_repr_html_() gets called. When this happens, we know we are in the
Jupyter notebook, or at least in something that can display html.
In the HTML that we then produce, we put the whole flexx library.
"""
def _repr_html_(self):
global is_notebook
from IPython.display import display, Javascript, HTML
if is_notebook:
return "<i>Flexx already loaded</i>" # Don't inject twice
is_notebook = True
host, port = _tornado_app.serving_at
#name = app.app_name + '-' + app.id
name = '__default__'
url = 'ws://%s:%i/%s/ws' % (host, port, name)
t = "Injecting JS/CSS"
t += "<style>\n%s\n</style>\n" % clientCode.get_css()
t += "<script>\n%s\n</script>" % clientCode.get_js()
t += "<script>flexx.ws_url=%r; flexx.is_notebook=true; flexx.init();</script>" % url
#return t + '<i>Flexx is ready to go</i>'
display(HTML(t))
return '<i>Flexx is ready to go</i>'
def stop():
""" Stop the event loop
"""
_tornado_loop.stop()
# # todo: this does not work if the event loop is running!
# def process_events():
# """ Process events
#
# Call this to keep the application working while running in a loop.
# """
# _tornado_loop.run_sync(lambda x=None: None)
def call_later(delay, callback, *args, **kwargs):
""" Call the given callback after delay seconds. If delay is zero,
call in the next event loop iteration.
"""
if delay <= 0:
_tornado_loop.add_callback(callback, *args, **kwargs)
else:
_tornado_loop.call_later(delay, callback, *args, **kwargs)
# todo: move to ..util?
def create_enum(*members):
""" Create an enum type from given string arguments.
"""
assert all([isinstance(m, str) for m in members])
enums = dict([(s, s) for s in members])
return type('Enum', (), enums)
def make_app(cls=None, **signal_values):
""" Mark a Pair class as an app, to be used as a class decorator.
Does the following things:
* The class is registered as an app with the server, so that clients
(incoming connections) can load the app.
* Adds a ``launch()`` function to the class to easily create an app
instance.
* Adds an ``export()`` function to the class to allow exporting to
static HTML.
* adds ``_IS_APP`` attribute to the class with value ``True`` (used
internally).
Parameters for the launch function:
``runtime`` (str): the web runtime to launch the app in. Default
'xul'. ``**signal_values``: combined with the signal_values given
to the ``make_app()`` decorator, these are used to initialize signal
values for the app instance.
"""
signal_values1 = signal_values
def _make_app(cls):
def launch(runtime='xul', **signal_values):
""" Launch an instance of this app in the specified runtime.
"""
signal_values2 = signal_values
# Get final kwargs list
d = {}
d.update(signal_values1)
d.update(signal_values2)
# Instantiate widget with a fresh client object
proxy = Proxy(cls.__name__, runtime, **d)
app = cls(proxy=proxy)
proxy._set_pair_instance(app)
return app
def export(filename=None):
""" Export the app to HTML
"""
proxy = Proxy(cls.__name__, '<export>')
app = cls(proxy=proxy)
proxy._set_pair_instance(app)
if filename is None:
return proxy._ws.to_html()
else:
return proxy._ws.write_html(filename)
manager.register_app_class(cls)
cls.launch = launch
cls.export = export
cls._IS_APP = True # Mark the class as an app
return cls
if cls is None:
return _make_app
else:
return _make_app(cls)
# todo: this does not work well with creating apps from scratch yet; see run_python_in_node.py example
class Proxy(object):
""" A proxy between Python and the client runtime
This class is basically a wrapper for the app widget, the web runtime,
and the websocket instance that connects to it.
"""
STATUS = create_enum('PENDING', 'CONNECTED', 'CLOSED')
def __init__(self, app_name, runtime=None, **runtime_kwargs):
# Note: to avoid circular references, do not store the app instance!
self._app_name = app_name
self._runtime_kwargs = runtime_kwargs
# Init runtime object (the runtime argument is a string)
self._runtime = None
# Init websocket, will be set when a connection is made
self._ws = None
# Unless app_name is __default__, the proxy will have a Pair instance
self._pair = None
# Object to manage the client code (JS/CSS/HTML)
self._known_pair_classes = set()
for cls in clientCode.get_defined_pair_classes():
self._known_pair_classes.add(cls)
# While the client is not connected, we keep a queue of
# commands, which are send to the client as soon as it connects
self._pending_commands = []
if runtime:
self._launch_runtime(runtime, **runtime_kwargs)
@property
def id(self):
""" The unique identifier of this app as a string. Used to
connect a runtime to a specific client.
"""
return '%x' % id(self)
@property
def app_name(self):
""" The name of the application that this proxy represents.
"""
return self._app_name
def __repr__(self):
s = self.status.lower()
return '<Proxy for %r (%s) at 0x%x>' % (self.app_name, s, id(self))
def _launch_runtime(self, runtime, **runtime_kwargs):
# Register the instance at the manager
manager.add_pending_proxy_instance(self)
if runtime == '<export>':
self._ws = Exporter(self)
elif runtime == 'notebook':
pass
elif runtime:
init_server()
host, port = _tornado_app.serving_at
# We associate the runtime with this specific app instance by
# including the app id to the url. In this way, it is pretty
# much guaranteed that the runtime will connect to *this* app.
name = self.app_name
if name != '__default__':
name += '-' + self.id
if runtime == 'nodejs':
self._runtime = launch('http://%s:%i/%s/' % (host, port, name),
runtime=runtime, code=clientCode.get_js())
else:
self._runtime = launch('http://%s:%i/%s/' % (host, port, name),
runtime=runtime, **runtime_kwargs)
print('Instantiate app client %s' % self.app_name)
def _connect_client(self, ws):
assert self._ws is None
# Set websocket object - this is what changes the status to CONNECTED
self._ws = ws
# todo: re-enable this
# Set some app specifics
# self._ws.command('ICON %s.ico' % self.id)
# self._ws.command('TITLE %s' % self._config.title)
# Send pending commands
for command in self._pending_commands:
self._ws.command(command)
def _set_pair_instance(self, pair):
assert self._pair is None
self._pair = pair
# todo: connect to title change and icon change events
def close(self):
""" Close the runtime, if possible
"""
# todo: close via JS
if self._runtime:
self._runtime.close()
if self._pair:
self._pair = None # break circular reference
@property
def status(self):
""" The status of this proxy. Can be PENDING, CONNECTED or
CLOSED. See Proxy.STATUS enum.
"""
# todo: is this how we want to do enums throughout?
if self._ws is None:
return self.STATUS.PENDING # not connected yet
elif self._ws.close_code is None:
return self.STATUS.CONNECTED # alive and kicking
else:
return self.STATUS.CLOSED # connection closed
## Widget-facing code
def register_pair_class(self, cls):
# todo: do we use this somewhere? It should
""" Register the given class. If already registered, this function
does nothing.
"""
if not (isinstance(cls, type) and issubclass(cls, Pair)):
raise ValueError('Not a Pair class')
if cls in self._known_pair_classes:
return
# Make sure the base classes are defined first
for cls2 in cls.mro()[1:]:
if not issubclass(cls2, Pair): # True if cls2 is *the* Pair class
break
if cls2 not in self._known_pair_classes:
self.register_pair_class(cls2)
# Register
self._known_pair_classes.add(cls)
# Define class
print('Dynamically defining class!', cls)
js = cls.get_js()
css = cls.get_css()
self._send_command('DEFINE-JS ' + js)
if css.strip():
self._send_command('DEFINE-CSS ' + css)
def _send_command(self, command):
""" Send the command, add to pending queue, or error when closed.
"""
if self.status == self.STATUS.CONNECTED:
if is_notebook:
# In the notebook, we send commands via a JS display, so that
# they are also executed when the notebook is exported
from IPython.display import display, Javascript
display(Javascript('flexx.command(%r);' % command))
else:
self._ws.command(command)
elif self.status == self.STATUS.PENDING:
self._pending_commands.append(command)
else:
raise RuntimeError('Cannot send commands; app is closed')
def _receive_command(self, command):
""" Received a command from JS.
"""
if command.startswith('RET '):
print(command[4:]) # Return value
elif command.startswith('ERROR '):
logging.error('JS - ' + command[6:].strip())
elif command.startswith('WARN '):
logging.warn('JS - ' + command[5:].strip())
elif command.startswith('PRINT '):
print(command[5:].strip())
elif command.startswith('INFO '):
logging.info('JS - ' + command[5:].strip())
elif command.startswith('SIGNAL '):
# todo: seems weird to deal with here. implement this by registring some handler?
_, id, signal_name, txt = command.split(' ', 3)
ob = Pair._instances.get(id, None)
if ob is not None:
# Note that this will again sync with JS, but it stops there:
# eventual synchronity
#print('setting signal from js:', signal_name)
signal = getattr(ob, signal_name)
value = serializer.loads(txt)
signal._set(value)
else:
logging.warn('Unknown command received from JS:\n%s' % command)
def _exec(self, code):
""" Like eval, but without returning the result value.
"""
self._send_command('EXEC ' + code)
def eval(self, code):
""" Evaluate the given JavaScript code in the client
Intended for use during development and debugging. Deployable
code should avoid making use of this function.
"""
if self._ws is None:
raise RuntimeError('App not connected')
self._send_command('EVAL ' + code)
|
{
"content_hash": "dfe4a588b8718325b74dd4ba3afbe9f6",
"timestamp": "",
"source": "github",
"line_count": 571,
"max_line_length": 102,
"avg_line_length": 35.25919439579685,
"alnum_prop": 0.5727909402473551,
"repo_name": "almarklein/flexx",
"id": "e8302d8ac448508f499cb5d041e6f2e2a56757d0",
"size": "20133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flexx/app/proxy.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3085"
},
{
"name": "JavaScript",
"bytes": "70"
},
{
"name": "Python",
"bytes": "539379"
}
],
"symlink_target": ""
}
|
"""Generate .gclient file for Angle.
Because gclient won't accept "--name ." use a different name then edit.
"""
import subprocess
import sys
def main():
gclient_cmd = ('gclient config --name change2dot --unmanaged '
'https://chromium.googlesource.com/angle/angle.git')
try:
rc = subprocess.call(gclient_cmd, shell=True)
except OSError:
print 'could not run "%s" via shell' % gclient_cmd
sys.exit(1)
if rc:
print 'failed command: "%s"' % gclient_cmd
sys.exit(1)
with open('.gclient') as gclient_file:
content = gclient_file.read()
with open('.gclient', 'w') as gclient_file:
gclient_file.write(content.replace('change2dot', '.'))
print 'created .gclient'
if __name__ == '__main__':
main()
|
{
"content_hash": "52d3f41a44c28d62263b895b090078ed",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 71,
"avg_line_length": 24.424242424242426,
"alnum_prop": 0.6029776674937966,
"repo_name": "youtube/cobalt",
"id": "e039007714a24c913d12de70f4040dd3caccda4a",
"size": "979",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/angle/scripts/bootstrap.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
This module contains the Location class.
"""
# Will Holmgren, University of Arizona, 2014-2016.
import datetime
import pandas as pd
import pytz
from pvlib import solarposition
from pvlib import clearsky
from pvlib import atmosphere
class Location(object):
"""
Location objects are convenient containers for latitude, longitude,
timezone, and altitude data associated with a particular
geographic location. You can also assign a name to a location object.
Location objects have two timezone attributes:
* ``tz`` is a IANA timezone string.
* ``pytz`` is a pytz timezone object.
Location objects support the print method.
Parameters
----------
latitude : float.
Positive is north of the equator.
Use decimal degrees notation.
longitude : float.
Positive is east of the prime meridian.
Use decimal degrees notation.
tz : str, int, float, or pytz.timezone.
See
http://en.wikipedia.org/wiki/List_of_tz_database_time_zones
for a list of valid time zones.
pytz.timezone objects will be converted to strings.
ints and floats must be in hours from UTC.
alitude : float.
Altitude from sea level in meters.
name : None or string.
Sets the name attribute of the Location object.
**kwargs
Arbitrary keyword arguments.
Included for compatibility, but not used.
See also
--------
pvsystem.PVSystem
"""
def __init__(self, latitude, longitude, tz='UTC', altitude=0,
name=None, **kwargs):
self.latitude = latitude
self.longitude = longitude
if isinstance(tz, str):
self.tz = tz
self.pytz = pytz.timezone(tz)
elif isinstance(tz, datetime.tzinfo):
self.tz = tz.zone
self.pytz = tz
elif isinstance(tz, (int, float)):
self.tz = tz
self.pytz = pytz.FixedOffset(tz*60)
else:
raise TypeError('Invalid tz specification')
self.altitude = altitude
self.name = name
# needed for tying together Location and PVSystem in LocalizedPVSystem
# if LocalizedPVSystem signature is reversed
# super(Location, self).__init__(**kwargs)
def __str__(self):
return ('{}: latitude={}, longitude={}, tz={}, altitude={}'
.format(self.name, self.latitude, self.longitude,
self.tz, self.altitude))
@classmethod
def from_tmy(cls, tmy_metadata, tmy_data=None, **kwargs):
"""
Create an object based on a metadata
dictionary from tmy2 or tmy3 data readers.
Parameters
----------
tmy_metadata : dict
Returned from tmy.readtmy2 or tmy.readtmy3
tmy_data : None or DataFrame
Optionally attach the TMY data to this object.
Returns
-------
Location object (or the child class of Location that you
called this method from).
"""
# not complete, but hopefully you get the idea.
# might need code to handle the difference between tmy2 and tmy3
# determine if we're dealing with TMY2 or TMY3 data
tmy2 = tmy_metadata.get('City', False)
latitude = tmy_metadata['latitude']
longitude = tmy_metadata['longitude']
if tmy2:
name = tmy_metadata['City']
else:
name = tmy_metadata['Name']
tz = tmy_metadata['TZ']
altitude = tmy_metadata['altitude']
new_object = cls(latitude, longitude, tz=tz, altitude=altitude,
name=name, **kwargs)
# not sure if this should be assigned regardless of input.
if tmy_data is not None:
new_object.tmy_data = tmy_data
return new_object
def get_solarposition(self, times, pressure=None, temperature=12,
**kwargs):
"""
Uses the :py:func:`solarposition.get_solarposition` function
to calculate the solar zenith, azimuth, etc. at this location.
Parameters
----------
times : DatetimeIndex
pressure : None, float, or array-like
If None, pressure will be calculated using
:py:func:`atmosphere.alt2pres` and ``self.altitude``.
temperature : None, float, or array-like
kwargs passed to :py:func:`solarposition.get_solarposition`
Returns
-------
solar_position : DataFrame
Columns depend on the ``method`` kwarg, but always include
``zenith`` and ``azimuth``.
"""
if pressure is None:
pressure = atmosphere.alt2pres(self.altitude)
return solarposition.get_solarposition(times, latitude=self.latitude,
longitude=self.longitude,
altitude=self.altitude,
pressure=pressure,
temperature=temperature,
**kwargs)
def get_clearsky(self, times, model='ineichen', **kwargs):
"""
Calculate the clear sky estimates of GHI, DNI, and/or DHI
at this location.
Parameters
----------
times : DatetimeIndex
model : str
The clear sky model to use.
kwargs passed to the relevant function(s).
Returns
-------
clearsky : DataFrame
Column names are: ``ghi, dni, dhi``.
"""
if model == 'ineichen':
cs = clearsky.ineichen(times, latitude=self.latitude,
longitude=self.longitude,
altitude=self.altitude,
**kwargs)
elif model == 'haurwitz':
solpos = self.get_solarposition(times, **kwargs)
cs = clearsky.haurwitz(solpos['apparent_zenith'])
else:
raise ValueError('{} is not a valid clear sky model'
.format(model))
return cs
def get_airmass(self, times=None, solar_position=None,
model='kastenyoung1989'):
"""
Calculate the relative and absolute airmass.
Automatically chooses zenith or apparant zenith
depending on the selected model.
Parameters
----------
times : None or DatetimeIndex
Only used if solar_position is not provided.
solar_position : None or DataFrame
DataFrame with with columns 'apparent_zenith', 'zenith'.
model : str
Relative airmass model
Returns
-------
airmass : DataFrame
Columns are 'airmass_relative', 'airmass_absolute'
"""
if solar_position is None:
solar_position = self.get_solarposition(times)
if model in atmosphere.APPARENT_ZENITH_MODELS:
zenith = solar_position['apparent_zenith']
elif model in atmosphere.TRUE_ZENITH_MODELS:
zenith = solar_position['zenith']
else:
raise ValueError('{} is not a valid airmass model'.format(model))
airmass_relative = atmosphere.relativeairmass(zenith, model)
pressure = atmosphere.alt2pres(self.altitude)
airmass_absolute = atmosphere.absoluteairmass(airmass_relative,
pressure)
airmass = pd.DataFrame()
airmass['airmass_relative'] = airmass_relative
airmass['airmass_absolute'] = airmass_absolute
return airmass
|
{
"content_hash": "b658a7ec1bc0dd14e3f57ad7ca24369d",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 78,
"avg_line_length": 31.857707509881422,
"alnum_prop": 0.5450372208436725,
"repo_name": "ianctse/pvlib-python",
"id": "1b0b9db23ef76bec4b7b991623116be8b9d51a65",
"size": "8060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pvlib/location.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PowerShell",
"bytes": "2985"
},
{
"name": "Python",
"bytes": "466906"
}
],
"symlink_target": ""
}
|
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtUiTools import *
from sendMessageForm import sendMessageUI
from addUser import addUserUI
from findUser import findUserUI
from searchProfByCourseID import findProfByCourseIDUI
from addCourseToProfForm import addCourseToProfUI
from searchProfByCourseID import findProfByCourseIDUI
from searchCourseByProfIDForm import searchCourseByProfIDUI
from findCourse import findCourseUI
import plugin.databaseConnect as database
class otherOptionUI(QMainWindow):
def __init__(self,parent = None):
QMainWindow.__init__(self,None)
self.setMinimumSize(900,600)
self.setWindowTitle("Class Course")
palette = QPalette()
palette.setBrush(QPalette.Background,QBrush(QPixmap("resources/imagess/programBackground.png")))
self.edu_logo = QPixmap("resources/images/educationLogo.png")
self.setPalette(palette)
self.bar = QPixmap("resources/images/topBarBackground.png")
self.parent = parent
self.UIinit()
def UIinit(self):
loader = QUiLoader()
form = loader.load("resources/UI/otherOption.ui",None)
self.setCentralWidget(form)
#Upper Bar
self.bar_group = form.findChild(QLabel,"barLabel")
self.bar_group.setPixmap(self.bar)
self.home_button = form.findChild(QPushButton,"homeButton")
self.profile_button = form.findChild(QPushButton,"profileButton")
self.faculties_button = form.findChild(QPushButton,"facultiesButton")
self.majors_button = form.findChild(QPushButton,"majorsButton")
self.course_button = form.findChild(QPushButton,"courseButton")
self.other_button = form.findChild(QPushButton, "othersButton")
#page properties
self.add_user_button = form.findChild(QPushButton,"addUserButton")
self.search_user_button = form.findChild(QPushButton,"searchUserButton")
self.assign_course_button = form.findChild(QPushButton,"assignCourseButton")
self.search_course_by_id = form.findChild(QPushButton,"searchCourseByID")
self.search_prof_by_course = form.findChild(QPushButton,"searchProfByCourseID")
self.search_course_by_prof = form.findChild(QPushButton, "searchCourseByProfID")
self.message = form.findChild(QPushButton, "messageButton")
self.increase_year = form.findChild(QPushButton, "increaseYearButton")
#Upper Bar pressed
self.home_button.clicked.connect(self.goHome)
self.faculties_button.clicked.connect(self.goFac)
self.majors_button.clicked.connect(self.goMaj)
self.other_button.clicked.connect(self.goOther)
self.course_button.clicked.connect(self.goCourse)
self.profile_button.clicked.connect(self.goProfile)
#Internal Button Pressed
self.add_user_button.clicked.connect(self.addUser)
self.search_user_button.clicked.connect(self.searchUser)
self.assign_course_button.clicked.connect(self.assignCoursetoProf)
self.search_course_by_id.clicked.connect(self.searchCoursebyID)
self.search_prof_by_course.clicked.connect(self.searchProfbyCourse)
self.search_course_by_prof.clicked.connect(self.searchCoursebyProf)
self.message.clicked.connect(self.createMessage)
self.increase_year.clicked.connect(self.increaseYears)
def increaseYears(self):
db = database.databaseAdmin()
if(self.parent.showCONFIRM("Are you sure?", "Are you sure you want to iterate year and term?\n Once done, it cannot be un-done.")):
if (self.parent.showCONFIRM("CRITICAL WARNING","THIS ACTION CANNOT BE UNDONE. PLEASE CONFIRM ACTION.")):
if(db.incrementData()):
self.parent.showOK("Data Edited", "All Year and Term are iterated.")
def createMessage(self):
self.createM = sendMessageUI(parent = self.parent)
self.createM.show()
def goHome(self):
self.parent.changePageLoginSection("home")
def goProfile(self):
self.parent.changePageLoginSection("profile")
def goFac(self):
self.parent.changePageLoginSection("addfaculties")
def goMaj(self):
self.parent.changePageLoginSection("addmajor")
def goCourse(self):
self.parent.changePageLoginSection("addcourse")
def goOther(self):
self.parent.changePageLoginSection("otherOption")
def addUser(self):
self.addUs = addUserUI(parent = self.parent)
self.addUs.show()
##Search User by using User ID##
def searchUser(self):
self.searchUs = findUserUI(parent = self.parent)
self.searchUs.show()
##Assign course to Professors##
def assignCoursetoProf(self):
self.assigning = addCourseToProfUI(parent = self.parent)
self.assigning.show()
##Search for course by using course ID##
def searchCoursebyID(self):
self.findCourseUI = findCourseUI(None, None, None, None, parent = self)
self.findCourseUI.show()
##Search for Professor by using course ID##
def searchProfbyCourse(self):
self.searchCourseID = findProfByCourseIDUI(parent = self.parent)
self.searchCourseID.show()
##Search for course by using professor's ID##
def searchCoursebyProf(self):
self.searchCourseID = searchCourseByProfIDUI(parent = self.parent)
self.searchCourseID.show()
|
{
"content_hash": "728a7f6a69c9fe705172aea72d16b079",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 139,
"avg_line_length": 36.86486486486486,
"alnum_prop": 0.6986803519061584,
"repo_name": "Poom1997/GMan",
"id": "d38181414fcabb3471d0596f563a2b6606818d9e",
"size": "5456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "otherOptionForm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "317"
},
{
"name": "CSS",
"bytes": "822"
},
{
"name": "HTML",
"bytes": "2146"
},
{
"name": "Python",
"bytes": "158551"
}
],
"symlink_target": ""
}
|
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1_endpoints import V1Endpoints
class TestV1Endpoints(unittest.TestCase):
""" V1Endpoints unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1Endpoints(self):
"""
Test V1Endpoints
"""
model = lib_openshift.models.v1_endpoints.V1Endpoints()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "598dbea368dc2ae14d67b97a4212ab37",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 76,
"avg_line_length": 24.215686274509803,
"alnum_prop": 0.6898785425101215,
"repo_name": "detiber/lib_openshift",
"id": "fab1882c1d0921927e3a2a5f6d953119301eeccb",
"size": "1252",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_v1_endpoints.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "61305"
},
{
"name": "Python",
"bytes": "6202851"
},
{
"name": "Shell",
"bytes": "2825"
}
],
"symlink_target": ""
}
|
from typing import Callable, List, Optional
class FormattedException(Exception):
pass
class TemplateParserException(Exception):
def __init__(self, message: str) -> None:
self.message = message
def __str__(self) -> str:
return self.message
class TokenizationException(Exception):
def __init__(self, message: str, line_content: Optional[str] = None) -> None:
self.message = message
self.line_content = line_content
class TokenizerState:
def __init__(self) -> None:
self.i = 0
self.line = 1
self.col = 1
class Token:
def __init__(self, kind: str, s: str, tag: str, line: int, col: int, line_span: int) -> None:
self.kind = kind
self.s = s
self.tag = tag
self.line = line
self.col = col
self.line_span = line_span
def tokenize(text: str) -> List[Token]:
def advance(n: int) -> None:
for _ in range(n):
state.i += 1
if state.i >= 0 and text[state.i - 1] == "\n":
state.line += 1
state.col = 1
else:
state.col += 1
def looking_at(s: str) -> bool:
return text[state.i : state.i + len(s)] == s
def looking_at_htmlcomment() -> bool:
return looking_at("<!--")
def looking_at_handlebarcomment() -> bool:
return looking_at("{{!")
def looking_at_djangocomment() -> bool:
return looking_at("{#")
def looking_at_handlebarpartial() -> bool:
return looking_at("{{>")
def looking_at_html_start() -> bool:
return looking_at("<") and not looking_at("</")
def looking_at_html_end() -> bool:
return looking_at("</")
def looking_at_handlebars_start() -> bool:
return looking_at("{{#") or looking_at("{{^")
def looking_at_handlebars_end() -> bool:
return looking_at("{{/")
def looking_at_django_start() -> bool:
return looking_at("{% ") and not looking_at("{% end")
def looking_at_django_end() -> bool:
return looking_at("{% end")
def looking_at_jinja2_end_whitespace_stripped() -> bool:
return looking_at("{%- end")
def looking_at_jinja2_start_whitespace_stripped_type2() -> bool:
# This function detects tag like {%- if foo -%}...{% endif %}
return looking_at("{%-") and not looking_at("{%- end")
state = TokenizerState()
tokens = []
while state.i < len(text):
try:
if looking_at_htmlcomment():
s = get_html_comment(text, state.i)
tag = s[4:-3]
kind = "html_comment"
elif looking_at_handlebarcomment():
s = get_handlebar_comment(text, state.i)
tag = s[3:-2]
kind = "handlebar_comment"
elif looking_at_djangocomment():
s = get_django_comment(text, state.i)
tag = s[2:-2]
kind = "django_comment"
elif looking_at_handlebarpartial():
s = get_handlebar_partial(text, state.i)
tag = s[9:-2]
kind = "handlebars_singleton"
elif looking_at_html_start():
s = get_html_tag(text, state.i)
if s.endswith("/>"):
end_offset = -2
else:
end_offset = -1
tag_parts = s[1:end_offset].split()
if not tag_parts:
raise TemplateParserException("Tag name missing")
tag = tag_parts[0]
if tag == "!DOCTYPE":
kind = "html_doctype"
elif s.endswith("/>"):
kind = "html_singleton"
else:
kind = "html_start"
elif looking_at_html_end():
s = get_html_tag(text, state.i)
tag = s[2:-1]
kind = "html_end"
elif looking_at_handlebars_start():
s = get_handlebars_tag(text, state.i)
tag = s[3:-2].split()[0]
if tag.startswith("*"):
tag = tag[1:]
kind = "handlebars_start"
elif looking_at_handlebars_end():
s = get_handlebars_tag(text, state.i)
tag = s[3:-2]
kind = "handlebars_end"
elif looking_at_django_start():
s = get_django_tag(text, state.i)
tag = s[3:-2].split()[0]
kind = "django_start"
if s[-3] == "-":
kind = "jinja2_whitespace_stripped_start"
elif looking_at_django_end():
s = get_django_tag(text, state.i)
tag = s[6:-3]
kind = "django_end"
elif looking_at_jinja2_end_whitespace_stripped():
s = get_django_tag(text, state.i)
tag = s[7:-3]
kind = "jinja2_whitespace_stripped_end"
elif looking_at_jinja2_start_whitespace_stripped_type2():
s = get_django_tag(text, state.i, stripped=True)
tag = s[3:-3].split()[0]
kind = "jinja2_whitespace_stripped_type2_start"
else:
advance(1)
continue
except TokenizationException as e:
raise FormattedException(
f'''{e.message} at line {state.line} col {state.col}:"{e.line_content}"''',
)
line_span = len(s.split("\n"))
token = Token(
kind=kind,
s=s,
tag=tag.strip(),
line=state.line,
col=state.col,
line_span=line_span,
)
tokens.append(token)
advance(len(s))
def add_pseudo_end_token(kind: str) -> None:
token = Token(
kind=kind,
s="</" + tag + ">",
tag=tag,
line=state.line,
col=state.col,
line_span=1,
)
tokens.append(token)
if kind == "html_singleton":
# Here we insert a Pseudo html_singleton_end tag so as to have
# ease of detection of end of singleton html tags which might be
# needed in some cases as with our html pretty printer.
add_pseudo_end_token("html_singleton_end")
if kind == "handlebars_singleton":
# We insert a pseudo handlbar end tag for singleton cases of
# handlebars like the partials. This helps in indenting multi line partials.
add_pseudo_end_token("handlebars_singleton_end")
return tokens
HTML_VOID_TAGS = {
"area",
"base",
"br",
"col",
"command",
"embed",
"hr",
"img",
"input",
"keygen",
"link",
"meta",
"param",
"source",
"track",
"wbr",
}
def validate(
fn: Optional[str] = None, text: Optional[str] = None, check_indent: bool = True
) -> None:
assert fn or text
if fn is None:
fn = "<in memory file>"
if text is None:
with open(fn) as f:
text = f.read()
try:
tokens = tokenize(text)
except FormattedException as e:
raise TemplateParserException(
f"""
fn: {fn}
{e}"""
)
class State:
def __init__(self, func: Callable[[Token], None]) -> None:
self.depth = 0
self.foreign = False
self.matcher = func
def no_start_tag(token: Token) -> None:
raise TemplateParserException(
f"""
No start tag
fn: {fn}
end tag:
{token.tag}
line {token.line}, col {token.col}
"""
)
state = State(no_start_tag)
def start_tag_matcher(start_token: Token) -> None:
state.depth += 1
start_tag = start_token.tag.strip("~")
start_line = start_token.line
start_col = start_token.col
old_matcher = state.matcher
old_foreign = state.foreign
if start_tag in ["math", "svg"]:
state.foreign = True
def f(end_token: Token) -> None:
end_tag = end_token.tag.strip("~")
end_line = end_token.line
end_col = end_token.col
if start_tag == "a":
max_lines = 3
else:
max_lines = 1
problem = None
if (start_tag == "code") and (end_line == start_line + 1):
problem = "Code tag is split across two lines."
if start_tag != end_tag:
problem = "Mismatched tag."
elif check_indent and (end_line > start_line + max_lines):
if end_col != start_col:
problem = "Bad indentation."
if problem:
raise TemplateParserException(
f"""
fn: {fn}
{problem}
start:
{start_token.s}
line {start_line}, col {start_col}
end tag:
{end_tag}
line {end_line}, col {end_col}
"""
)
state.matcher = old_matcher
state.foreign = old_foreign
state.depth -= 1
state.matcher = f
for token in tokens:
kind = token.kind
tag = token.tag
if kind == "html_start":
if not state.foreign and tag in HTML_VOID_TAGS:
raise TemplateParserException(
f"Tag must be self-closing: {tag} at {fn} line {token.line}, col {token.col}"
)
start_tag_matcher(token)
elif kind == "html_singleton":
if not state.foreign and tag not in HTML_VOID_TAGS:
raise TemplateParserException(
f"Tag must not be self-closing: {tag} at {fn} line {token.line}, col {token.col}"
)
elif kind == "html_end":
state.matcher(token)
elif kind == "handlebars_start":
start_tag_matcher(token)
elif kind == "handlebars_end":
state.matcher(token)
elif kind in {
"django_start",
"jinja2_whitespace_stripped_start",
"jinja2_whitespace_stripped_type2_start",
}:
if is_django_block_tag(tag):
start_tag_matcher(token)
elif kind in {"django_end", "jinja2_whitespace_stripped_end"}:
state.matcher(token)
if state.depth != 0:
raise TemplateParserException("Missing end tag")
def is_django_block_tag(tag: str) -> bool:
return tag in [
"autoescape",
"block",
"comment",
"for",
"if",
"ifequal",
"macro",
"verbatim",
"blocktrans",
"trans",
"raw",
"with",
]
def get_handlebars_tag(text: str, i: int) -> str:
end = i + 2
while end < len(text) - 1 and text[end] != "}":
end += 1
if text[end] != "}" or text[end + 1] != "}":
raise TokenizationException('Tag missing "}}"', text[i : end + 2])
s = text[i : end + 2]
return s
def get_django_tag(text: str, i: int, stripped: bool = False) -> str:
end = i + 2
if stripped:
end += 1
while end < len(text) - 1 and text[end] != "%":
end += 1
if text[end] != "%" or text[end + 1] != "}":
raise TokenizationException('Tag missing "%}"', text[i : end + 2])
s = text[i : end + 2]
return s
def get_html_tag(text: str, i: int) -> str:
quote_count = 0
end = i + 1
unclosed_end = 0
while end < len(text) and (text[end] != ">" or quote_count % 2 != 0 and text[end] != "<"):
if text[end] == '"':
quote_count += 1
if not unclosed_end and text[end] == "<":
unclosed_end = end
end += 1
if quote_count % 2 != 0:
if unclosed_end:
raise TokenizationException("Unbalanced quotes", text[i:unclosed_end])
else:
raise TokenizationException("Unbalanced quotes", text[i : end + 1])
if end == len(text) or text[end] != ">":
raise TokenizationException('Tag missing ">"', text[i : end + 1])
s = text[i : end + 1]
return s
def get_html_comment(text: str, i: int) -> str:
end = i + 7
unclosed_end = 0
while end <= len(text):
if text[end - 3 : end] == "-->":
return text[i:end]
if not unclosed_end and text[end] == "<":
unclosed_end = end
end += 1
raise TokenizationException("Unclosed comment", text[i:unclosed_end])
def get_handlebar_comment(text: str, i: int) -> str:
end = i + 5
unclosed_end = 0
while end <= len(text):
if text[end - 2 : end] == "}}":
return text[i:end]
if not unclosed_end and text[end] == "<":
unclosed_end = end
end += 1
raise TokenizationException("Unclosed comment", text[i:unclosed_end])
def get_django_comment(text: str, i: int) -> str:
end = i + 4
unclosed_end = 0
while end <= len(text):
if text[end - 2 : end] == "#}":
return text[i:end]
if not unclosed_end and text[end] == "<":
unclosed_end = end
end += 1
raise TokenizationException("Unclosed comment", text[i:unclosed_end])
def get_handlebar_partial(text: str, i: int) -> str:
end = i + 10
unclosed_end = 0
while end <= len(text):
if text[end - 2 : end] == "}}":
return text[i:end]
if not unclosed_end and text[end] == "<":
unclosed_end = end
end += 1
raise TokenizationException("Unclosed partial", text[i:unclosed_end])
|
{
"content_hash": "6028af7765c81a36480938c48d09d2cb",
"timestamp": "",
"source": "github",
"line_count": 458,
"max_line_length": 101,
"avg_line_length": 30.30131004366812,
"alnum_prop": 0.4909929384637556,
"repo_name": "hackerkid/zulip",
"id": "cf558d7f9df29ef8662255a1dd5a24336e7ebdc9",
"size": "13878",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/lib/template_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "397271"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "717106"
},
{
"name": "JavaScript",
"bytes": "3079595"
},
{
"name": "Perl",
"bytes": "398763"
},
{
"name": "Puppet",
"bytes": "71210"
},
{
"name": "Python",
"bytes": "6876664"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "119833"
},
{
"name": "TypeScript",
"bytes": "14645"
}
],
"symlink_target": ""
}
|
__author__ = 'sergey'
"""
Class for LZ4 compression helper High Level
"""
import sys
from dedupsqlfs.compression import BaseCompression
class Lz4hCompression(BaseCompression):
_method_name = "lz4"
_minimal_size = 15
_has_comp_level_options = False
def _init_module(self):
if not self._module:
if self._method_name in sys.modules:
del sys.modules[ self._method_name ]
self._module = __import__(self._method_name)
func_comp = getattr(self._module, "compress")
def compress_high(data):
return func_comp(data, "high_compression")
self._func_comp = compress_high
self._func_decomp = getattr(self._module, "decompress")
return
pass
|
{
"content_hash": "02c87898205a7104353d55c893bcfb18",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 67,
"avg_line_length": 24.25,
"alnum_prop": 0.6005154639175257,
"repo_name": "sergey-dryabzhinsky/dedupsqlfs",
"id": "6030b27d32e73d1d9d3cdfff3f22335786fa6c6a",
"size": "800",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dedupsqlfs/compression/lz4h.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5509796"
},
{
"name": "C++",
"bytes": "33360"
},
{
"name": "Cython",
"bytes": "107356"
},
{
"name": "Python",
"bytes": "1042676"
},
{
"name": "Shell",
"bytes": "1480"
}
],
"symlink_target": ""
}
|
"""
almost
~~~~~~
A helper for approximate comparison.
::
from almost import almost
def test_repeating_decimal():
assert almost(1 / 3.) == 0.333
assert almost(1 / 6.) == 0.167
assert almost(3227 / 555., prec=6) == 5.814414
def test_irrational_number():
import math
assert almost(math.pi) == 3.142
assert almost(math.sqrt(2)) == 1.414
def test_random_text():
import random
def gen_text_with_prefix(prefix):
return prefix + str(random.random())[:-5]
assert almost(gen_text_with_prefix('@')) == '@...'
:copyright: (c) 2013 by Heungsub Lee
:license: BSD, see LICENSE for more details.
"""
try:
from numbers import Number
except ImportError:
# for Python 2.5
Number = (int, float, long)
import operator
import re
import sys
from types import GeneratorType
import warnings
__version__ = '0.1.5'
#: A wild card pattern in Regex.
WILDCARD = '.*?'
#: The ``_sre.SRE_Pattern`` class to check normal value types.
SRE_Pattern = type(re.compile(''))
#: ``(str, unicode)`` in Python 2, ``str`` in Python 3.
String = (str, unicode) if sys.version_info[0] < 3 else str
class NormalNumber(int):
pass
class Approximate(object):
def __init__(self, value, prec=3, ellipsis='...', precision=None):
if precision is not None:
prec = precision
warnings.warn('Use \'prec\' keyword instead of \'precision\'',
DeprecationWarning)
self.value = value
self.prec = prec
self.ellipsis = ellipsis
@property
def normal(self):
return self.normalize(self.value)
def normalize(self, value):
if isinstance(value, NormalNumber):
return value
elif isinstance(value, Number):
if self.prec < 0:
value = value / (10. ** -self.prec)
as_str = str(value)
if as_str.endswith('inf'):
return chr(0) + as_str + chr(0)
elif as_str.endswith('nan'):
return '\x01nan\x01'
try:
fmt = '%.{0}f'.format(max(0, self.prec))
except AttributeError: # for Python 2.5
fmt = '%%.%df' % max(0, self.prec)
return NormalNumber((fmt % value).replace('.', ''))
elif isinstance(value, String):
return re.compile(value.replace(self.ellipsis, WILDCARD))
try:
# detect if the valueue is iterable
iter(value)
except TypeError:
pass
else:
if isinstance(value, dict):
values = {}
for key, val in value.items():
values[key] = self.normalize(val)
return values
else:
return list(map(self.normalize, value))
return value
def almost_equals(self, value1, value2):
normal1 = self.normalize(value1)
normal2 = self.normalize(value2)
type1, type2 = type(normal1), type(normal2)
if not issubclass(type1, type2) and not issubclass(type2, type1):
return False
elif isinstance(normal1, Number):
try:
return abs(normal1 - normal2) <= 1
except TypeError:
return False
elif isinstance(normal1, SRE_Pattern):
return (normal1.match(normal2.pattern) or
normal2.match(normal1.pattern)) is not None
elif isinstance(normal1, dict):
if len(normal1) != len(normal2):
return False
return all(self.almost_equals(normal1[key], normal2[key])
for key in normal1.keys())
elif isinstance(normal1, list):
if len(normal1) != len(normal2):
return False
return all(self.almost_equals(*args)
for args in zip(normal1, normal2))
return normal1 == normal2
def __eq__(self, other):
return self.almost_equals(self.value, other)
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.normalize(self.value) < self.normalize(other)
def __gt__(self, other):
return self.normalize(self.value) > self.normalize(other)
def __le__(self, other):
return self.normalize(self.value) <= self.normalize(other)
def __ge__(self, other):
return self.normalize(self.value) >= self.normalize(other)
def __contains__(self, other):
normal_value = self.normalize(self.value)
normal_other = self.normalize(other)
assert type(normal_value) is type(normal_other)
assert type(normal_value) is SRE_Pattern
if (normal_value.search(normal_other.pattern) or
normal_other.search(normal_value.pattern)):
return True
return (WILDCARD in normal_value.pattern or
WILDCARD in normal_other.pattern)
def __repr__(self):
return 'almost(' + repr(self.value) + ')'
#: An alias of :class:`Approximate`.
almost = Approximate
|
{
"content_hash": "4628ae3c0dde9f31742036a35e6bffe6",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 74,
"avg_line_length": 31.3855421686747,
"alnum_prop": 0.5552783109404991,
"repo_name": "sublee/almost",
"id": "c347006711c3f1f4dd04ff0c0678a63740c89cef",
"size": "5234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "almost.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12726"
}
],
"symlink_target": ""
}
|
"""
A UE4 specific PySide based UI Widget for Fast-Fetch functionality.
This Widget will be integrated into the m2uMainWindow by the common UI.
"""
import os
from PySide import QtGui
from m2u.ue4 import assets
thispath = os.path.dirname(os.path.realpath(__file__))
icoFetch = QtGui.QIcon(os.path.join(thispath, "icoFetch.png"))
class ue4PSUIFetchWidget(QtGui.QWidget):
def __init__(self, *args, **kwargs):
super(ue4PSUIFetchWidget, self).__init__(*args, **kwargs)
self.buildUI()
self.connectUI()
def buildUI(self):
self.fetchSelectedBtn = QtGui.QPushButton(text="Fast Fetch Selected")
self.fetchSelectedBtn.setIcon(icoFetch)
tooltip = ("Get the selected objects from the Editor by exporting to "
"a single .fbx file. ")
self.fetchSelectedBtn.setToolTip(tooltip)
layout = QtGui.QHBoxLayout()
layout.addWidget(self.fetchSelectedBtn)
layout.setContentsMargins(1, 1, 1, 1)
layout.addStretch()
self.setLayout(layout)
def connectUI(self):
self.fetchSelectedBtn.clicked.connect(self.fetchSelectedBtnClicked)
def fetchSelectedBtnClicked(self):
assets.fetch_selected_objects()
|
{
"content_hash": "b5fc2cbd828014fd0bb2ee564db28602",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 29.238095238095237,
"alnum_prop": 0.6815960912052117,
"repo_name": "m2u/m2u",
"id": "d6e336dd26af955ff20c82dd918f06fa91959d6d",
"size": "1228",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ue4/ui/ue4PSUIFetchWidget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "227309"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from storages.backends.s3boto import S3BotoStorage
MediaS3BotoStorage = lambda: S3BotoStorage(bucket='halalar-media')
StaticS3BotoStorage = lambda: S3BotoStorage(bucket='halalar')
|
{
"content_hash": "dbc89f66b14989dd51a60938e3e4999f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 66,
"avg_line_length": 36.833333333333336,
"alnum_prop": 0.8190045248868778,
"repo_name": "jawaidss/halalar-web",
"id": "6b70cb77ceb4f41ab10f624a091bdc7c815205b2",
"size": "221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "halalar/halalar/storages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "376"
},
{
"name": "HTML",
"bytes": "17295"
},
{
"name": "Python",
"bytes": "74755"
}
],
"symlink_target": ""
}
|
import sys, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", "..")))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'py-couchdb'
copyright = u'2015, Andrey Antukh'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.12'
# The full version, including alpha/beta/rc tags.
release = '1.12'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'kr'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'py-couchdbdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'py-couchdb.tex', u'py-couchdb Documentation',
u'Andrey Antukh', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'py-couchdb', u'py-couchdb Documentation',
[u'Andrey Antukh'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'py-couchdb', u'py-couchdb Documentation',
u'Andrey Antukh', 'py-couchdb', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "f26964fe2b1ad44f02f3d0956592220c",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 94,
"avg_line_length": 32.11538461538461,
"alnum_prop": 0.6991350632069195,
"repo_name": "krisb78/py-couchdb",
"id": "ef33f16cc8eee40bec0bad3264e702c2638422cc",
"size": "7936",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "56417"
}
],
"symlink_target": ""
}
|
import contextlib
import sys
import os
import itertools
import hashlib
import queue
import random
import select
import time
import OpenSSL.crypto
import logging
from mitmproxy import certs
from mitmproxy import exceptions
from mitmproxy.net import tcp
from mitmproxy.net import websockets
from mitmproxy.net import socks
from mitmproxy.net import http as net_http
from mitmproxy.types import basethread
from mitmproxy.utils import strutils
from pathod import log
from pathod import language
from pathod.protocols import http2
logging.getLogger("hpack").setLevel(logging.WARNING)
def xrepr(s):
return repr(s)[1:-1]
class PathocError(Exception):
pass
class SSLInfo:
def __init__(self, certchain, cipher, alp):
self.certchain, self.cipher, self.alp = certchain, cipher, alp
def __str__(self):
parts = [
"Application Layer Protocol: %s" % strutils.always_str(self.alp, "utf8"),
"Cipher: %s, %s bit, %s" % self.cipher,
"SSL certificate chain:"
]
for n, i in enumerate(self.certchain):
parts.append(" Certificate [%s]" % n)
parts.append("\tSubject: ")
for cn in i.get_subject().get_components():
parts.append("\t\t%s=%s" % (
strutils.always_str(cn[0], "utf8"),
strutils.always_str(cn[1], "utf8"))
)
parts.append("\tIssuer: ")
for cn in i.get_issuer().get_components():
parts.append("\t\t%s=%s" % (
strutils.always_str(cn[0], "utf8"),
strutils.always_str(cn[1], "utf8"))
)
parts.extend(
[
"\tVersion: %s" % i.get_version(),
"\tValidity: %s - %s" % (
strutils.always_str(i.get_notBefore(), "utf8"),
strutils.always_str(i.get_notAfter(), "utf8")
),
"\tSerial: %s" % i.get_serial_number(),
"\tAlgorithm: %s" % strutils.always_str(i.get_signature_algorithm(), "utf8")
]
)
pk = i.get_pubkey()
types = {
OpenSSL.crypto.TYPE_RSA: "RSA",
OpenSSL.crypto.TYPE_DSA: "DSA"
}
t = types.get(pk.type(), "Uknown")
parts.append("\tPubkey: %s bit %s" % (pk.bits(), t))
s = certs.SSLCert(i)
if s.altnames:
parts.append("\tSANs: %s" % " ".join(strutils.always_str(n, "utf8") for n in s.altnames))
return "\n".join(parts)
class WebsocketFrameReader(basethread.BaseThread):
def __init__(
self,
rfile,
logfp,
showresp,
hexdump,
ws_read_limit,
timeout
):
basethread.BaseThread.__init__(self, "WebsocketFrameReader")
self.timeout = timeout
self.ws_read_limit = ws_read_limit
self.logfp = logfp
self.showresp = showresp
self.hexdump = hexdump
self.rfile = rfile
self.terminate = queue.Queue()
self.frames_queue = queue.Queue()
self.logger = log.ConnectionLogger(
self.logfp,
self.hexdump,
False,
rfile if showresp else None,
None
)
@contextlib.contextmanager
def terminator(self):
yield
self.frames_queue.put(None)
def run(self):
starttime = time.time()
with self.terminator():
while True:
if self.ws_read_limit == 0:
return
try:
r, _, _ = select.select([self.rfile], [], [], 0.05)
except OSError: # pragma: no cover
return # this is not reliably triggered due to its nature, so we exclude it from coverage.
delta = time.time() - starttime
if not r and self.timeout and delta > self.timeout:
return
try:
self.terminate.get_nowait()
return
except queue.Empty:
pass
for rfile in r:
with self.logger.ctx() as log:
try:
frm = websockets.Frame.from_file(self.rfile)
except exceptions.TcpDisconnect:
return
self.frames_queue.put(frm)
log("<< %s" % repr(frm.header))
if self.ws_read_limit is not None:
self.ws_read_limit -= 1
starttime = time.time()
class Pathoc(tcp.TCPClient):
def __init__(
self,
address,
# SSL
ssl=None,
sni=None,
ssl_version=tcp.SSL_DEFAULT_METHOD,
ssl_options=tcp.SSL_DEFAULT_OPTIONS,
clientcert=None,
ciphers=None,
# HTTP/2
use_http2=False,
http2_skip_connection_preface=False,
http2_framedump=False,
# Websockets
ws_read_limit=None,
# Network
timeout=None,
# Output control
showreq=False,
showresp=False,
explain=False,
hexdump=False,
ignorecodes=(),
ignoretimeout=False,
showsummary=False,
fp=sys.stdout
):
"""
spec: A request specification
showreq: Print requests
showresp: Print responses
explain: Print request explanation
showssl: Print info on SSL connection
hexdump: When printing requests or responses, use hex dump output
showsummary: Show a summary of requests
ignorecodes: Sequence of return codes to ignore
"""
tcp.TCPClient.__init__(self, address)
self.ssl, self.sni = ssl, sni
self.clientcert = clientcert
self.ssl_version = ssl_version
self.ssl_options = ssl_options
self.ciphers = ciphers
self.sslinfo = None
self.use_http2 = use_http2
self.http2_skip_connection_preface = http2_skip_connection_preface
self.http2_framedump = http2_framedump
self.ws_read_limit = ws_read_limit
self.timeout = timeout
self.showreq = showreq
self.showresp = showresp
self.explain = explain
self.hexdump = hexdump
self.ignorecodes = ignorecodes
self.ignoretimeout = ignoretimeout
self.showsummary = showsummary
self.fp = fp
self.ws_framereader = None
if self.use_http2:
if not tcp.HAS_ALPN: # pragma: no cover
log.write_raw(
self.fp,
"HTTP/2 requires ALPN support. "
"Please use OpenSSL >= 1.0.2. "
"Pathoc might not be working as expected without ALPN.",
timestamp=False
)
self.protocol = http2.HTTP2StateProtocol(self, dump_frames=self.http2_framedump)
else:
self.protocol = net_http.http1
self.settings = language.Settings(
is_client=True,
staticdir=os.getcwd(),
unconstrained_file_access=True,
request_host=self.address[0],
protocol=self.protocol,
)
def http_connect(self, connect_to):
req = net_http.Request(
first_line_format='authority',
method='CONNECT',
scheme=None,
host=connect_to[0].encode("idna"),
port=connect_to[1],
path=None,
http_version='HTTP/1.1',
content=b'',
)
self.wfile.write(net_http.http1.assemble_request(req))
self.wfile.flush()
try:
resp = self.protocol.read_response(self.rfile, req)
if resp.status_code != 200:
raise exceptions.HttpException("Unexpected status code: %s" % resp.status_code)
except exceptions.HttpException as e:
raise PathocError(
"Proxy CONNECT failed: %s" % repr(e)
)
def socks_connect(self, connect_to):
try:
client_greet = socks.ClientGreeting(
socks.VERSION.SOCKS5,
[socks.METHOD.NO_AUTHENTICATION_REQUIRED]
)
client_greet.to_file(self.wfile)
self.wfile.flush()
server_greet = socks.ServerGreeting.from_file(self.rfile)
server_greet.assert_socks5()
if server_greet.method != socks.METHOD.NO_AUTHENTICATION_REQUIRED:
raise socks.SocksError(
socks.METHOD.NO_ACCEPTABLE_METHODS,
"pathoc only supports SOCKS without authentication"
)
connect_request = socks.Message(
socks.VERSION.SOCKS5,
socks.CMD.CONNECT,
socks.ATYP.DOMAINNAME,
connect_to,
)
connect_request.to_file(self.wfile)
self.wfile.flush()
connect_reply = socks.Message.from_file(self.rfile)
connect_reply.assert_socks5()
if connect_reply.msg != socks.REP.SUCCEEDED:
raise socks.SocksError(
connect_reply.msg,
"SOCKS server error"
)
except (socks.SocksError, exceptions.TcpDisconnect) as e:
raise PathocError(str(e))
def connect(self, connect_to=None, showssl=False, fp=sys.stdout):
"""
connect_to: A (host, port) tuple, which will be connected to with
an HTTP CONNECT request.
"""
if self.use_http2 and not self.ssl:
raise NotImplementedError("HTTP2 without SSL is not supported.")
with tcp.TCPClient.connect(self) as closer:
if connect_to:
self.http_connect(connect_to)
self.sslinfo = None
if self.ssl:
try:
alpn_protos = [b'http/1.1']
if self.use_http2:
alpn_protos.append(b'h2')
self.convert_to_ssl(
sni=self.sni,
cert=self.clientcert,
method=self.ssl_version,
options=self.ssl_options,
cipher_list=self.ciphers,
alpn_protos=alpn_protos
)
except exceptions.TlsException as v:
raise PathocError(str(v))
self.sslinfo = SSLInfo(
self.connection.get_peer_cert_chain(),
self.get_current_cipher(),
self.get_alpn_proto_negotiated()
)
if showssl:
print(str(self.sslinfo), file=fp)
if self.use_http2:
self.protocol.check_alpn()
if not self.http2_skip_connection_preface:
self.protocol.perform_client_connection_preface()
if self.timeout:
self.settimeout(self.timeout)
return closer.pop()
def stop(self):
if self.ws_framereader:
self.ws_framereader.terminate.put(None)
def wait(self, timeout=0.01, finish=True):
"""
A generator that yields frames until Pathoc terminates.
timeout: If specified None may be yielded instead if timeout is
reached. If timeout is None, wait forever. If timeout is 0, return
immedately if nothing is on the queue.
finish: If true, consume messages until the reader shuts down.
Otherwise, return None on timeout.
"""
if self.ws_framereader:
while True:
try:
frm = self.ws_framereader.frames_queue.get(
timeout=timeout,
block=True if timeout != 0 else False
)
except queue.Empty:
if finish:
continue
else:
return
if frm is None:
self.ws_framereader.join()
self.ws_framereader = None
return
yield frm
def websocket_send_frame(self, r):
"""
Sends a single websocket frame.
"""
logger = log.ConnectionLogger(
self.fp,
self.hexdump,
False,
None,
self.wfile if self.showreq else None,
)
with logger.ctx() as lg:
lg(">> %s" % r)
language.serve(r, self.wfile, self.settings)
self.wfile.flush()
def websocket_start(self, r):
"""
Performs an HTTP request, and attempts to drop into websocket
connection.
"""
resp = self.http(r)
if resp.status_code == 101:
self.ws_framereader = WebsocketFrameReader(
self.rfile,
self.fp,
self.showresp,
self.hexdump,
self.ws_read_limit,
self.timeout
)
self.ws_framereader.start()
return resp
def http(self, r):
"""
Performs a single request.
r: A language.http.Request object, or a string representing one
request.
Returns Response if we have a non-ignored response.
May raise a exceptions.NetlibException
"""
logger = log.ConnectionLogger(
self.fp,
self.hexdump,
False,
self.rfile if self.showresp else None,
self.wfile if self.showreq else None,
)
with logger.ctx() as lg:
lg(">> %s" % r)
resp, req = None, None
try:
req = language.serve(r, self.wfile, self.settings)
self.wfile.flush()
# build a dummy request to read the reponse
# ideally this would be returned directly from language.serve
dummy_req = net_http.Request(
first_line_format="relative",
method=req["method"],
scheme=b"http",
host=b"localhost",
port=80,
path=b"/",
http_version=b"HTTP/1.1",
content=b'',
)
resp = self.protocol.read_response(self.rfile, dummy_req)
resp.sslinfo = self.sslinfo
except exceptions.HttpException as v:
lg("Invalid server response: %s" % v)
raise
except exceptions.TcpTimeout:
if self.ignoretimeout:
lg("Timeout (ignored)")
return None
lg("Timeout")
raise
finally:
if resp:
lg("<< %s %s: %s bytes" % (
resp.status_code, strutils.escape_control_characters(resp.reason) if resp.reason else "", len(resp.content)
))
if resp.status_code in self.ignorecodes:
lg.suppress()
return resp
def request(self, r):
"""
Performs a single request.
r: A language.message.Messsage object, or a string representing
one.
Returns Response if we have a non-ignored response.
May raise a exceptions.NetlibException
"""
if isinstance(r, str):
r = next(language.parse_pathoc(r, self.use_http2))
if isinstance(r, language.http.Request):
if r.ws:
return self.websocket_start(r)
else:
return self.http(r)
elif isinstance(r, language.websockets.WebsocketFrame):
self.websocket_send_frame(r)
elif isinstance(r, language.http2.Request):
return self.http(r)
# elif isinstance(r, language.http2.Frame):
# TODO: do something
def main(args): # pragma: no cover
memo = set()
p = None
if args.repeat == 1:
requests = args.requests
else:
# If we are replaying more than once, we must convert the request generators to lists
# or they will be exhausted after the first run.
# This is bad for the edge-case where get:/:x10000000 (see 0da3e51) is combined with -n 2,
# but does not matter otherwise.
requests = [list(x) for x in args.requests]
try:
requests_done = 0
while True:
if requests_done == args.repeat:
break
if args.wait and requests_done > 0:
time.sleep(args.wait)
requests_done += 1
if args.random:
playlist = random.choice(requests)
else:
playlist = itertools.chain.from_iterable(requests)
p = Pathoc(
(args.host, args.port),
ssl=args.ssl,
sni=args.sni,
ssl_version=args.ssl_version,
ssl_options=args.ssl_options,
clientcert=args.clientcert,
ciphers=args.ciphers,
use_http2=args.use_http2,
http2_skip_connection_preface=args.http2_skip_connection_preface,
http2_framedump=args.http2_framedump,
showreq=args.showreq,
showresp=args.showresp,
explain=args.explain,
hexdump=args.hexdump,
ignorecodes=args.ignorecodes,
timeout=args.timeout,
ignoretimeout=args.ignoretimeout,
showsummary=True
)
trycount = 0
try:
with p.connect(args.connect_to, args.showssl):
for spec in playlist:
if args.explain or args.memo:
spec = spec.freeze(p.settings)
if args.memo:
h = hashlib.sha256(spec.spec()).digest()
if h not in memo:
trycount = 0
memo.add(h)
else:
trycount += 1
if trycount > args.memolimit:
print("Memo limit exceeded...", file=sys.stderr)
return
else:
continue
try:
ret = p.request(spec)
if ret and args.oneshot:
return
# We consume the queue when we can, so it doesn't build up.
for _ in p.wait(timeout=0, finish=False):
pass
except exceptions.NetlibException:
break
for _ in p.wait(timeout=0.01, finish=True):
pass
except exceptions.TcpException as v:
print(str(v), file=sys.stderr)
continue
except PathocError as v:
print(str(v), file=sys.stderr)
sys.exit(1)
except KeyboardInterrupt:
pass
if p:
p.stop()
|
{
"content_hash": "d27a2b3f4af568511d737ab41408c83a",
"timestamp": "",
"source": "github",
"line_count": 589,
"max_line_length": 131,
"avg_line_length": 33.835314091680814,
"alnum_prop": 0.4910432033719705,
"repo_name": "mosajjal/mitmproxy",
"id": "4a61334999f37e83ca3996c3c6122e99d6be7c93",
"size": "19929",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pathod/pathoc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17457"
},
{
"name": "HTML",
"bytes": "4270"
},
{
"name": "JavaScript",
"bytes": "149498"
},
{
"name": "PowerShell",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1491468"
},
{
"name": "Shell",
"bytes": "3660"
}
],
"symlink_target": ""
}
|
"""Class for screen sizes and orientation."""
import objc_util
import ui
app = objc_util.UIApplication.sharedApplication()
UIScreen = objc_util.ObjCClass("UIScreen")
orientation_codes = {
1: "bottom",
2: "top",
3: "left",
4: "right"
}
class _ScreenOrientation(object):
"""Represents a device orientation state."""
def __init__(self, orientation):
self.code = orientation
def __str__(self):
return orientation_codes[self.code]
def __int__(self):
return self.code
def __float__(self):
return float(self.code)
def __repr__(self):
return repr(self.code)
@property
def portrait(self):
return self.code in (1, 3)
@property
def landscape(self):
return self.code in (2, 4)
class Screen(object):
"""An interface to access characteristics of the device's screen."""
@property
def size(self):
"""Get screen size."""
return ui.get_screen_size()
@property
def width(self):
"""The width of the screen."""
return self.size[0]
@property
def height(self):
"""The height of the screen."""
return self.size[1]
@property
def min(self):
return min(self.size)
@property
def max(self):
return max(self.size)
def __iter__(self):
"""This allows the min and max functions to work on this class."""
return iter(self.size)
@property
def orientation(self):
"""Get a numerical value representing the screen's orientation."""
# This doesn't use UIDevice because my approach is simpler, and
# accounts for rotation lock automatically.
return _ScreenOrientation(app.statusBarOrientation())
@property
def portrait(self):
return self.orientation.portrait
@property
def landscape(self):
return self.orientation.landscape
@property
def is_retina(self):
return UIScreen.mainScreen().scale() == 2.0
def __repr__(self):
a = "Retina screen" if self.is_retina else "Screen"
b = str(self.size)
c = "with the {} side down".format(self.orientation)
return " ".join((a, b, c))
screen = Screen()
|
{
"content_hash": "c8b985c396a66e10f89affdfb99d2dc7",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 74,
"avg_line_length": 23.164948453608247,
"alnum_prop": 0.5990209167779261,
"repo_name": "controversial/ui2",
"id": "15ef5cde3ba8b8f4c84042d49efaaf5d1e034e70",
"size": "2247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui2/screen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66391"
}
],
"symlink_target": ""
}
|
from klein import Klein
app = Klein()
@app.route('/user/<username>')
def pg_user(request, username):
return 'Hi %s!' % (username,)
app.run("localhost", 8080)
|
{
"content_hash": "e2f21a770ebb9a579e726560ce6410f5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 33,
"avg_line_length": 20.5,
"alnum_prop": 0.6585365853658537,
"repo_name": "brighid/klein",
"id": "d72776eb61d69de04d6271bab3adb6d612bdd097",
"size": "164",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "docs/introduction/codeexamples/variableRoutes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "67912"
},
{
"name": "Shell",
"bytes": "448"
}
],
"symlink_target": ""
}
|
import sqlalchemy as sql
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
request_token_table = sql.Table('request_token', meta, autoload=True)
request_token_table.c.requested_roles.alter(name="role_ids", nullable=True)
access_token_table = sql.Table('access_token', meta, autoload=True)
access_token_table.c.requested_roles.alter(name="role_ids")
def downgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
request_token_table = sql.Table('request_token', meta, autoload=True)
request_token_table.c.role_ids.alter(name="requested_roles",
nullable=False)
access_token_table = sql.Table('access_token', meta, autoload=True)
access_token_table.c.role_ids.alter(name="requested_roles")
|
{
"content_hash": "8de18410426a63add03ae17d28601d82",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 41.25,
"alnum_prop": 0.6836363636363636,
"repo_name": "cloudbau/keystone",
"id": "aec13b8d1f51ac1996d9c99fbf547f99d26cd0de",
"size": "1456",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "keystone/contrib/oauth1/migrate_repo/versions/004_request_token_roles_nullable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2383366"
},
{
"name": "Shell",
"bytes": "11206"
}
],
"symlink_target": ""
}
|
import kfp
from kfp import components
from kfp import dsl
robomaker_sim_job_op = components.load_component_from_file(
"../../simulation_job/component.yaml"
)
@dsl.pipeline(
name="Run RoboMaker Simulation Job",
description="RoboMaker Simulation Job test pipeline",
)
def robomaker_simulation_job_test(
region="",
role="",
output_bucket="",
output_path="",
max_run="",
failure_behavior="",
sim_app_arn="",
sim_app_launch_config="",
robot_app_arn="",
robot_app_launch_config="",
):
robomaker_sim_job_op(
region=region,
role=role,
output_bucket=output_bucket,
output_path=output_path,
max_run=max_run,
failure_behavior=failure_behavior,
sim_app_arn=sim_app_arn,
sim_app_launch_config=sim_app_launch_config,
robot_app_arn=robot_app_arn,
robot_app_launch_config=robot_app_launch_config,
)
if __name__ == "__main__":
kfp.compiler.Compiler().compile(robomaker_simulation_job_test, __file__ + ".yaml")
|
{
"content_hash": "7e6211c737cd6f6072d422023b5b0d92",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 86,
"avg_line_length": 24.833333333333332,
"alnum_prop": 0.6289549376797698,
"repo_name": "kubeflow/pipelines",
"id": "34fcea30fb327a9f172933d2468a561562406b9c",
"size": "1043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/aws/sagemaker/tests/integration_tests/resources/definition/robomaker_simulation_job_pipeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "CSS",
"bytes": "2171"
},
{
"name": "Dockerfile",
"bytes": "49331"
},
{
"name": "Go",
"bytes": "1903937"
},
{
"name": "HTML",
"bytes": "3656"
},
{
"name": "JavaScript",
"bytes": "544297"
},
{
"name": "Jinja",
"bytes": "938"
},
{
"name": "Jupyter Notebook",
"bytes": "359548"
},
{
"name": "Makefile",
"bytes": "22164"
},
{
"name": "Mustache",
"bytes": "23652"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "5684887"
},
{
"name": "Shell",
"bytes": "264595"
},
{
"name": "Smarty",
"bytes": "8295"
},
{
"name": "Starlark",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "4294958"
}
],
"symlink_target": ""
}
|
wide_columns = [
gender, native_country, education, occupation, workclass, relationship, age_buckets,
tf.contrib.layers.crossed_column([education, occupation], hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([native_country, occupation], hash_bucket_size=int(1e4)),
tf.contrib.layers.crossed_column([age_buckets, education, occupation], hash_bucket_size=int(1e6))]
# In[ ]:
deep_columns = [
tf.contrib.layers.embedding_column(workclass, dimension=8),
tf.contrib.layers.embedding_column(education, dimension=8),
tf.contrib.layers.embedding_column(gender, dimension=8),
tf.contrib.layers.embedding_column(relationship, dimension=8),
tf.contrib.layers.embedding_column(native_country, dimension=8),
tf.contrib.layers.embedding_column(occupation, dimension=8),
age, education_num, capital_gain, capital_loss, hours_per_week]
# In[ ]:
import tempfile
model_dir = tempfile.mkdtemp()
m = tf.contrib.learn.DNNLinearCombinedClassifier(
model_dir=model_dir,
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=[100, 50])
# In[ ]:
import pandas as pd
import urllib
# Define the column names for the data sets.
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country", "income_bracket"]
LABEL_COLUMN = 'label'
CATEGORICAL_COLUMNS = ["workclass", "education", "marital_status", "occupation",
"relationship", "race", "gender", "native_country"]
CONTINUOUS_COLUMNS = ["age", "education_num", "capital_gain", "capital_loss",
"hours_per_week"]
# Download the training and test data to temporary files.
# Alternatively, you can download them yourself and change train_file and
# test_file to your own paths.
train_file = tempfile.NamedTemporaryFile()
test_file = tempfile.NamedTemporaryFile()
urllib.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.data", train_file.name)
urllib.urlretrieve("http://mlr.cs.umass.edu/ml/machine-learning-databases/adult/adult.test", test_file.name)
# Read the training and test data sets into Pandas dataframe.
df_train = pd.read_csv(train_file, names=COLUMNS, skipinitialspace=True)
df_test = pd.read_csv(test_file, names=COLUMNS, skipinitialspace=True, skiprows=1)
df_train[LABEL_COLUMN] = (df_train['income_bracket'].apply(lambda x: '>50K' in x)).astype(int)
df_test[LABEL_COLUMN] = (df_test['income_bracket'].apply(lambda x: '>50K' in x)).astype(int)
def input_fn(df):
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.constant(df[k].values)
for k in CONTINUOUS_COLUMNS}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in CATEGORICAL_COLUMNS}
# Merges the two dictionaries into one.
feature_cols = dict(continuous_cols.items() + categorical_cols.items())
# Converts the label column into a constant Tensor.
label = tf.constant(df[LABEL_COLUMN].values)
# Returns the feature columns and the label.
return feature_cols, label
def train_input_fn():
return input_fn(df_train)
def eval_input_fn():
return input_fn(df_test)
# In[ ]:
m.fit(input_fn=train_input_fn, steps=200)
results = m.evaluate(input_fn=eval_input_fn, steps=1)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
|
{
"content_hash": "c71e0242182225caeeff77b471fd4779",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 109,
"avg_line_length": 39.354166666666664,
"alnum_prop": 0.7112228692429857,
"repo_name": "zlpmichelle/crackingtensorflow",
"id": "2f2d0ce4133be259b29968afeb39a1297827334b",
"size": "3923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crackingcode/day13/cc_tf_day13_1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1109569"
},
{
"name": "Python",
"bytes": "583902"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import mock
from django import test
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from braces.views import AjaxResponseMixin
from .compat import force_text
from .factories import ArticleFactory, UserFactory
from .helpers import TestViewHelper
from .views import (SimpleJsonView, JsonRequestResponseView,
CustomJsonEncoderView)
from .compat import json
class TestAjaxResponseMixin(TestViewHelper, test.TestCase):
"""
Tests for AjaxResponseMixin.
"""
methods = ['get', 'post', 'put', 'delete']
def test_xhr(self):
"""
Checks if ajax_* method has been called for every http method.
"""
# AjaxResponseView returns 'AJAX_OK' when requested with XmlHttpRequest
for m in self.methods:
fn = getattr(self.client, m)
resp = fn('/ajax_response/', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
assert force_text(resp.content) == 'AJAX_OK'
def test_not_xhr(self):
"""
Normal methods (get, post, etc) should be used when handling non-ajax
requests.
"""
for m in self.methods:
fn = getattr(self.client, m)
resp = fn('/ajax_response/')
assert force_text(resp.content) == 'OK'
def test_fallback_to_normal_methods(self):
"""
Ajax methods should fallback to normal methods by default.
"""
test_cases = [
('get', 'get'),
('post', 'post'),
('put', 'get'),
('delete', 'get'),
]
for ajax_method, fallback in test_cases:
m, mixin = mock.Mock(), AjaxResponseMixin()
m.return_value = HttpResponse()
req = self.build_request()
setattr(mixin, fallback, m)
fn = getattr(mixin, "{0}_ajax".format(ajax_method))
ret = fn(req, 1, 2, meth=ajax_method)
# check if appropriate method has been called
m.assert_called_once_with(req, 1, 2, meth=ajax_method)
# check if appropriate value has been returned
self.assertIs(m.return_value, ret)
class TestJSONResponseMixin(TestViewHelper, test.TestCase):
"""
Tests for JSONResponseMixin.
"""
view_class = SimpleJsonView
def assert_json_response(self, resp, status_code=200):
self.assertEqual(status_code, resp.status_code)
self.assertEqual('application/json',
resp['content-type'].split(';')[0])
def get_content(self, url):
"""
GET url and return content
"""
resp = self.client.get(url)
self.assert_json_response(resp)
content = force_text(resp.content)
return content
def test_simple_json(self):
"""
Tests render_json_response() method.
"""
user = UserFactory()
self.client.login(username=user.username, password='asdf1234')
data = json.loads(self.get_content('/simple_json/'))
self.assertEqual({'username': user.username}, data)
def test_serialization(self):
"""
Tests render_json_object_response() method which serializes objects
using django's serializer framework.
"""
a1, a2 = [ArticleFactory() for __ in range(2)]
data = json.loads(self.get_content('/article_list_json/'))
self.assertIsInstance(data, list)
self.assertEqual(2, len(data))
titles = []
for row in data:
# only title has been serialized
self.assertEqual(1, len(row['fields']))
titles.append(row['fields']['title'])
self.assertIn(a1.title, titles)
self.assertIn(a2.title, titles)
def test_bad_content_type(self):
"""
ImproperlyConfigured exception should be raised if content_type
attribute is not set correctly.
"""
with self.assertRaises(ImproperlyConfigured):
self.dispatch_view(self.build_request(), content_type=['a'])
def test_pretty_json(self):
"""
Success if JSON responses are the same, and the well-indented response
is longer than the normal one.
"""
user = UserFactory()
self.client.login(username=user.username, password='asfa')
normal_content = self.get_content('/simple_json/')
self.view_class.json_dumps_kwargs = {'indent': 2}
pretty_content = self.get_content('/simple_json/')
normal_json = json.loads('{0}'.format(normal_content))
pretty_json = json.loads('{0}'.format(pretty_content))
self.assertEqual(normal_json, pretty_json)
self.assertTrue(len(pretty_content) > len(normal_content))
def test_json_encoder_class_atrribute(self):
"""
Tests setting custom `json_encoder_class` attribute.
"""
data = json.loads(self.get_content('/simple_json_custom_encoder/'))
self.assertEqual({'numbers': [1, 2, 3]}, data)
class TestJsonRequestResponseMixin(TestViewHelper, test.TestCase):
view_class = JsonRequestResponseView
request_dict = {'status': 'operational'}
def test_get_request_json_properly_formatted(self):
"""
Properly formatted JSON requests should result in a JSON object
"""
data = json.dumps(self.request_dict).encode('utf-8')
response = self.client.post(
'/json_request/',
content_type='application/json',
data=data
)
response_json = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response_json, self.request_dict)
def test_get_request_json_improperly_formatted(self):
"""
Improperly formatted JSON requests should make request_json == None
"""
response = self.client.post(
'/json_request/',
data=self.request_dict
)
response_json = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response_json, None)
def test_bad_request_response(self):
"""
If a view calls render_bad_request_response when request_json is empty
or None, the client should get a 400 error
"""
response = self.client.post(
'/json_bad_request/',
data=self.request_dict
)
response_json = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(response_json, self.view_class.error_response_dict)
def test_bad_request_response_with_custom_error_message(self):
"""
If a view calls render_bad_request_response when request_json is empty
or None, the client should get a 400 error
"""
response = self.client.post(
'/json_custom_bad_request/',
data=self.request_dict
)
response_json = json.loads(response.content.decode('utf-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(response_json, {'error': 'you messed up'})
|
{
"content_hash": "f9aeab80a54501811f6538df46294ea2",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 80,
"avg_line_length": 36,
"alnum_prop": 0.6091763405196241,
"repo_name": "hochanh/django-braces",
"id": "b5e95dad2ec439d8a1aee2c121c21ffb9b175f8d",
"size": "7236",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_ajax_mixins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "189"
},
{
"name": "Python",
"bytes": "121111"
}
],
"symlink_target": ""
}
|
from .base import logmmse, logmmse_from_file
|
{
"content_hash": "2ef64b9c78851316362fc0b8c9ed97ed",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 44,
"avg_line_length": 45,
"alnum_prop": 0.8,
"repo_name": "braindead/logmmse",
"id": "1569462445a69c905ae057e22de367a116dda67e",
"size": "45",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logmmse/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3910"
}
],
"symlink_target": ""
}
|
import logging
class Singleton:
"""
A non-thread-safe helper class to ease implementing singletons.
This should be used as a decorator -- not a metaclass -- to the
class that should be a singleton.
The decorated class can define one `__init__` function that
takes only the `self` argument. Other than that, there are
no restrictions that apply to the decorated class.
To get the singleton instance, use the `Instance` method. Trying
to use `__call__` will result in a `TypeError` being raised.
Limitations: The decorated class cannot be inherited from.
"""
def __init__(self, decorated):
self._decorated = decorated
def instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
try:
return self._instance
except AttributeError:
self._instance = self._decorated()
return self._instance
def __call__(self):
logging.error('Singletons must be accessed through `Instance()`.')
raise TypeError('Singletons must be accessed through `Instance()`.')
def __instancecheck__(self, inst):
return isinstance(inst, self._decorated)
|
{
"content_hash": "7cf337ea106b110e19ca27b94d688092",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 72,
"avg_line_length": 29.976744186046513,
"alnum_prop": 0.6974398758727696,
"repo_name": "eusyar/4fun",
"id": "6dd3d2ec2aa6ea2084acc949d7b35f1b5ee73c07",
"size": "1289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/DesignPatterns.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3869"
}
],
"symlink_target": ""
}
|
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print __doc__
# Code source: Gael Varoqueux
# Modified for Documentation merge by Jaques Grobler
# License: BSD
import numpy as np
import pylab as pl
from sklearn import linear_model, decomposition, datasets
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
from sklearn.pipeline import Pipeline
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
pl.figure(1, figsize=(4, 3))
pl.clf()
pl.axes([.2, .2, .7, .7])
pl.plot(pca.explained_variance_, linewidth=2)
pl.axis('tight')
pl.xlabel('n_components')
pl.ylabel('explained_variance_')
###############################################################################
# Prediction
from sklearn.grid_search import GridSearchCV
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
pl.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
pl.legend(prop=dict(size=12))
pl.show()
|
{
"content_hash": "bdd1770f90e6e22b43602d09ccab098a",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 26.676923076923078,
"alnum_prop": 0.5951557093425606,
"repo_name": "lucidfrontier45/scikit-learn",
"id": "8c9f061720122a67b9233538a3cbc06d10d19aa0",
"size": "1781",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "examples/plot_digits_pipe.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "10562935"
},
{
"name": "C++",
"bytes": "496247"
},
{
"name": "JavaScript",
"bytes": "4775"
},
{
"name": "Python",
"bytes": "3594152"
},
{
"name": "Shell",
"bytes": "687"
}
],
"symlink_target": ""
}
|
"""
Django settings for cs490 project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import secrets
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's1h=i5$z89!gi1&4046h58c9&*g)6!@&y&b^ch2m=nzt=#m5sd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = [
'localhost',
'0.0.0.0'
]
# Application definition
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
THIRD_PARTY_APPS = (
'taggit',
'guardian',
'crispy_forms',
'haystack',
'randomslugfield',
)
LOCAL_APPS = (
'accounts',
'core',
'qa',
'registration',
)
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
TEMPLATED_EMAIL_BACKEND = 'templated_email.backends.vanilla_django'
CRISPY_TEMPLATE_PACK = 'bootstrap3'
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'django_cas_ng.backends.CASBackend',
'guardian.backends.ObjectPermissionBackend',
)
ANONYMOUS_USER_ID = -1
ROOT_URLCONF = 'cs490.urls'
WSGI_APPLICATION = 'cs490.wsgi.application'
#######
####CAS SETTINGS#######
CAS_SERVER_URL = secrets.CAS_URL
CAS_REDIRECT_URL = 'http://localhost:8000/'
CAS_LOGOUT_COMPLETELY = True
#######END CAS SETTINGS##############
LOGIN_REDIRECT_URL = '/accounts/new'
ORGANIZATION_EMAIL_DOMAIN = 'masonlive.gmu.edu'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_PORT = 587
EMAIL_HOST_USER = secrets.EMAIL_HOST_USER
EMAIL_HOST_PASSWORD = secrets.EMAIL_HOST_PASSWORD
EMAIL_USE_TLS = True
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
MEDIA_ROOT = '/tmp/'
MEDIA_URL = '/file/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
STATIC_ROOT = 'static'
STATICFILES_DIRS = (
'bower_components',
'css',
)
TEMPLATE_DIRS = (
'templates',
'core/templates',
'registration/templates',
'accounts/templates',
)
|
{
"content_hash": "9cf939b097782ca86f44fe86887a743b",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 121,
"avg_line_length": 22.801242236024844,
"alnum_prop": 0.705802233723781,
"repo_name": "thebenwaters/openclickio",
"id": "edf1e533465ea8e6b4eb5cb659ea88080d170003",
"size": "3671",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cs490/settings/development.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "161671"
},
{
"name": "HTML",
"bytes": "225844"
},
{
"name": "JavaScript",
"bytes": "74674"
},
{
"name": "Python",
"bytes": "43633"
},
{
"name": "Shell",
"bytes": "518"
},
{
"name": "TeX",
"bytes": "202658"
}
],
"symlink_target": ""
}
|
import awscli.clidriver
import sys
import logging
import copy
LOG = logging.getLogger(__name__)
class Completer(object):
def __init__(self):
self.driver = awscli.clidriver.create_clidriver()
self.main_hc = self.driver.create_help_command()
self.main_options = self._documented(self.main_hc.arg_table)
self.cmdline = None
self.point = None
self.command_hc = None
self.subcommand_hc = None
self.command_name = None
self.subcommand_name = None
self.current_word = None
self.previous_word = None
self.non_options = None
def _complete_option(self, option_name):
if option_name == '--endpoint-url':
return []
if option_name == '--output':
cli_data = self.driver.session.get_data('cli')
return cli_data['options']['output']['choices']
if option_name == '--profile':
return self.driver.session.available_profiles
return []
def _complete_provider(self):
retval = []
if self.current_word.startswith('-'):
cw = self.current_word.lstrip('-')
l = ['--' + n for n in self.main_options
if n.startswith(cw)]
retval = l
elif self.current_word == 'aws':
retval = self._documented(self.main_hc.command_table)
else:
# Otherwise, see if they have entered a partial command name
retval = self._documented(self.main_hc.command_table,
startswith=self.current_word)
return retval
def _complete_command(self):
retval = []
if self.current_word == self.command_name:
if self.command_hc:
retval = self._documented(self.command_hc.command_table)
elif self.current_word.startswith('-'):
retval = self._find_possible_options()
else:
# See if they have entered a partial command name
if self.command_hc:
retval = self._documented(self.command_hc.command_table,
startswith=self.current_word)
return retval
def _documented(self, table, startswith=None):
names = []
for key, command in table.items():
if getattr(command, '_UNDOCUMENTED', False):
# Don't tab complete undocumented commands/params
continue
if startswith is not None and not key.startswith(startswith):
continue
if getattr(command, 'positional_arg', False):
continue
names.append(key)
return names
def _complete_subcommand(self):
retval = []
if self.current_word == self.subcommand_name:
retval = []
elif self.current_word.startswith('-'):
retval = self._find_possible_options()
return retval
def _find_possible_options(self):
all_options = copy.copy(self.main_options)
if self.subcommand_hc:
all_options = all_options + self._documented(self.subcommand_hc.arg_table)
for opt in self.options:
# Look thru list of options on cmdline. If there are
# options that have already been specified and they are
# not the current word, remove them from list of possibles.
if opt != self.current_word:
stripped_opt = opt.lstrip('-')
if stripped_opt in all_options:
all_options.remove(stripped_opt)
cw = self.current_word.lstrip('-')
possibles = ['--' + n for n in all_options if n.startswith(cw)]
if len(possibles) == 1 and possibles[0] == self.current_word:
return self._complete_option(possibles[0])
return possibles
def _process_command_line(self):
# Process the command line and try to find:
# - command_name
# - subcommand_name
# - words
# - current_word
# - previous_word
# - non_options
# - options
self.command_name = None
self.subcommand_name = None
self.words = self.cmdline[0:self.point].split()
self.current_word = self.words[-1]
if len(self.words) >= 2:
self.previous_word = self.words[-2]
else:
self.previous_word = None
self.non_options = [w for w in self.words if not w.startswith('-')]
self.options = [w for w in self.words if w.startswith('-')]
# Look for a command name in the non_options
for w in self.non_options:
if w in self.main_hc.command_table:
self.command_name = w
cmd_obj = self.main_hc.command_table[self.command_name]
self.command_hc = cmd_obj.create_help_command()
if self.command_hc and self.command_hc.command_table:
# Look for subcommand name
for w in self.non_options:
if w in self.command_hc.command_table:
self.subcommand_name = w
cmd_obj = self.command_hc.command_table[self.subcommand_name]
self.subcommand_hc = cmd_obj.create_help_command()
break
break
def complete(self, cmdline, point):
self.cmdline = cmdline
self.command_name = None
if point is None:
point = len(cmdline)
self.point = point
self._process_command_line()
if not self.command_name:
# If we didn't find any command names in the cmdline
# lets try to complete provider options
return self._complete_provider()
if self.command_name and not self.subcommand_name:
return self._complete_command()
return self._complete_subcommand()
def complete(cmdline, point):
choices = Completer().complete(cmdline, point)
print(' \n'.join(choices))
if __name__ == '__main__':
if len(sys.argv) == 3:
cmdline = sys.argv[1]
point = int(sys.argv[2])
elif len(sys.argv) == 2:
cmdline = sys.argv[1]
else:
print('usage: %s <cmdline> <point>' % sys.argv[0])
sys.exit(1)
print(complete(cmdline, point))
|
{
"content_hash": "f09908fdff763864a5f92de7cac9393c",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 89,
"avg_line_length": 37.98809523809524,
"alnum_prop": 0.55531181447822,
"repo_name": "LockScreen/Backend",
"id": "e457260277c889097049d7d5a86db88868ca9a26",
"size": "6948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/awscli/completer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1411"
},
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "59046"
},
{
"name": "HTML",
"bytes": "449"
},
{
"name": "JavaScript",
"bytes": "21987"
},
{
"name": "Python",
"bytes": "14239313"
},
{
"name": "Shell",
"bytes": "5692"
},
{
"name": "TeX",
"bytes": "1527"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(name='nyan_logger',
version='0.2',
description='A nyan cat log formatter for the python logging module.',
url='https://github.com/tomhennigan/nyan_logger',
author='Tom Hennigan, Louise Deason',
author_email='tomhennigan@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='nyan cat logging',
packages=['nyan_logger'],
zip_safe=False)
|
{
"content_hash": "d04664739418afb96067bf05e43265c3",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 77,
"avg_line_length": 37.375,
"alnum_prop": 0.6147157190635452,
"repo_name": "0x90/nyan_logger",
"id": "c69813fdc86ad67b6f6bc8485d7377ce659fa402",
"size": "1495",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19481"
}
],
"symlink_target": ""
}
|
import wx
import generic_class
from .constants import control, dtype, substitution_map
import os
import yaml
import modelDesign_window
ID_RUN = 11
class ModelConfig(wx.Frame):
# this creates the wx.Frame mentioned above in the class declaration
def __init__(self, parent, gpa_settings=None):
wx.Frame.__init__(
self, parent=parent, title="CPAC - Create New FSL Model", size=(900, 650))
if gpa_settings == None:
self.gpa_settings = {}
self.gpa_settings['subject_list'] = ''
self.gpa_settings['pheno_file'] = ''
self.gpa_settings['subject_id_label'] = ''
self.gpa_settings['design_formula'] = ''
self.gpa_settings['mean_mask'] = ''
self.gpa_settings['custom_roi_mask'] = 'None'
self.gpa_settings['coding_scheme'] = ''
self.gpa_settings['use_zscore'] = True
self.gpa_settings['derivative_list'] = ''
self.gpa_settings['repeated_measures'] = ''
self.gpa_settings['group_sep'] = ''
self.gpa_settings['grouping_var'] = 'None'
self.gpa_settings['z_threshold'] = ''
self.gpa_settings['p_threshold'] = ''
else:
self.gpa_settings = gpa_settings
self.parent = parent
mainSizer = wx.BoxSizer(wx.VERTICAL)
vertSizer = wx.BoxSizer(wx.VERTICAL)
self.panel = wx.Panel(self)
self.window = wx.ScrolledWindow(self.panel, size=(-1,300))
self.page = generic_class.GenericClass(self.window, " FSL Model Setup")
self.page.add(label="Subject List ",
control=control.COMBO_BOX,
name="subject_list",
type=dtype.STR,
comment="Full path to a list of subjects to be included in the model.\n\nThis should be a text file with one subject per line.\n\nTip 1: A list in this format contaning all subjects run through CPAC was generated along with the main CPAC subject list (see subject_list_group_analysis.txt).\n\nTIp 2: An easy way to manually create this file is to copy the subjects column from your Regressor/EV spreadsheet.",
values=self.gpa_settings['subject_list'])
self.page.add(label="Phenotype/EV File ",
control=control.COMBO_BOX,
name="pheno_file",
type=dtype.STR,
comment="Full path to a .csv file containing EV information for each subject.\n\nTip: A file in this format (containing a single column listing all subjects run through CPAC) was generated along with the main CPAC subject list (see template_phenotypic.csv).",
values=self.gpa_settings['pheno_file'])
self.page.add(label="Subjects Column Name ",
control=control.TEXT_BOX,
name="subject_id_label",
type=dtype.STR,
comment="Name of the subjects column in your EV file.",
values=self.gpa_settings['subject_id_label'],
style=wx.EXPAND | wx.ALL,
size=(160, -1))
load_panel_sizer = wx.BoxSizer(wx.HORIZONTAL)
load_pheno_btn = wx.Button(self.window, 2, 'Load Phenotype File', (220,10), wx.DefaultSize, 0)
load_panel_sizer.Add(load_pheno_btn)
self.Bind(wx.EVT_BUTTON, self.populateEVs, id=2)
self.page.add_pheno_load_panel(load_panel_sizer)
# experimental checkbox row stuff
self.page.add(label = "Model Setup ",
control = control.CHECKBOX_GRID,
name = "model_setup",
type = 9,#dtype.LBOOL,
values = '',
comment="A list of EVs from your phenotype file will populate in this window. From here, you can select whether the EVs should be treated as categorical or if they should be demeaned (continuous/non-categorical EVs only). 'MeanFD', 'MeanFD_Jenkinson', 'Measure Mean', and 'Custom_ROI_Mean' will also appear in this window automatically as options to be used as regressors that can be included in your model design. Note that the MeanFD and mean of measure values are automatically calculated and supplied by C-PAC via individual-level analysis.",
size = (450, -1))
self.page.add(label="Design Matrix Formula ",
control=control.TEXT_BOX,
name="design_formula",
type=dtype.STR,
comment="Specify the formula to describe your model design. Essentially, including EVs in this formula inserts them into the model. The most basic format to include each EV you select would be 'EV + EV + EV + ..', etc. You can also select to include MeanFD, MeanFD_Jenkinson, Measure_Mean, and Custom_ROI_Mean here. See the C-PAC User Guide for more detailed information regarding formatting your design formula.",
values= self.gpa_settings['design_formula'],
size=(450, -1))
self.page.add(label="Measure Mean Generation ",
control=control.CHOICE_BOX,
name='mean_mask',
type=dtype.LSTR,
comment = "Choose whether to use a group mask or individual-specific mask when calculating the output means to be used as a regressor.\n\nThis only takes effect if you include the 'Measure_Mean' regressor in your Design Matrix Formula.",
values=["Group Mask","Individual Mask"])
self.page.add(label="Custom ROI Mean Mask ",
control=control.COMBO_BOX,
name="custom_roi_mask",
type=dtype.STR,
comment="Optional: Full path to a NIFTI file containing one or more ROI masks. The means of the masked regions will then be computed for each subject's output and will be included in the model as regressors (one for each ROI in the mask file) if you include 'Custom_ROI_Mean' in the Design Matrix Formula.",
values=self.gpa_settings['custom_roi_mask'])
self.page.add(label="Use z-score Standardized Derivatives ",
control=control.CHOICE_BOX,
name='use_zscore',
type=dtype.BOOL,
comment="Run the group analysis model on the z-score " \
"standardized version of the derivatives you " \
"choose in the list below.",
values=["True","False"])
self.page.add(label = "Select Derivatives ",
control = control.CHECKLIST_BOX,
name = "derivative_list",
type = dtype.LSTR,
values = ['ALFF',
'ALFF (smoothed)',
'f/ALFF',
'f/ALFF (smoothed)',
'ReHo',
'ReHo (smoothed)',
'ROI Average SCA',
'ROI Average SCA (smoothed)',
'Voxelwise SCA',
'Voxelwise SCA (smoothed)',
'Dual Regression',
'Dual Regression (smoothed)',
'Multiple Regression SCA',
'Multiple Regression SCA (smoothed)',
'Network Centrality',
'Network Centrality (smoothed)',
'VMHC (z-score std only)',
'VMHC z-stat (z-score std only)'],
comment = "Select which derivatives you would like to include when running group analysis.\n\nWhen including Dual Regression, make sure to correct your P-value for the number of maps you are comparing.\n\nWhen including Multiple Regression SCA, you must have more degrees of freedom (subjects) than there were time series.",
size = (350,160))
self.page.add(label="Coding Scheme ",
control=control.CHOICE_BOX,
name="coding_scheme",
type=dtype.LSTR,
comment="Choose the coding scheme to use when generating your model. 'Treatment' encoding is generally considered the typical scheme. Consult the User Guide for more information.",
values=["Treatment", "Sum"])
self.page.add(label="Model Group Variances Separately ",
control=control.CHOICE_BOX,
name='group_sep',
type=dtype.NUM,
comment="Specify whether FSL should model the variance for each group separately.\n\nIf this option is enabled, you must specify a grouping variable below.",
values=['Off', 'On'])
self.page.add(label="Grouping Variable ",
control=control.TEXT_BOX,
name="grouping_var",
type=dtype.STR,
comment="The name of the EV that should be used to group subjects when modeling variances.\n\nIf you do not wish to model group variances separately, set this value to None.",
values=self.gpa_settings['grouping_var'],
size=(160, -1))
self.page.add(label="Run Repeated Measures ",
control=control.CHOICE_BOX,
name='repeated_measures',
type=dtype.BOOL,
comment="Run repeated measures to compare different " \
"scans (must use the group analysis subject " \
"list and phenotypic file formatted for " \
"repeated measures.",
values=["False","True"])
self.page.add(label="Z threshold ",
control=control.FLOAT_CTRL,
name='z_threshold',
type=dtype.NUM,
comment="Only voxels with a Z-score higher than this value will be considered significant.",
values=2.3)
self.page.add(label="Cluster Significance Threshold ",
control=control.FLOAT_CTRL,
name='p_threshold',
type=dtype.NUM,
comment="Significance threshold (P-value) to use when doing cluster correction for multiple comparisons.",
values=0.05)
self.page.set_sizer()
if 'group_sep' in self.gpa_settings.keys():
for ctrl in self.page.get_ctrl_list():
name = ctrl.get_name()
if name == 'group_sep':
if self.gpa_settings['group_sep'] == True:
ctrl.set_value('On')
elif self.gpa_settings['group_sep'] == False:
ctrl.set_value('Off')
mainSizer.Add(self.window, 1, wx.EXPAND)
btnPanel = wx.Panel(self.panel, -1)
hbox = wx.BoxSizer(wx.HORIZONTAL)
buffer = wx.StaticText(btnPanel, label="\t\t\t\t\t\t")
hbox.Add(buffer)
cancel = wx.Button(btnPanel, wx.ID_CANCEL, "Cancel", (
220, 10), wx.DefaultSize, 0)
self.Bind(wx.EVT_BUTTON, self.cancel, id=wx.ID_CANCEL)
hbox.Add(cancel, 0, flag=wx.LEFT | wx.BOTTOM, border=5)
load = wx.Button(btnPanel, wx.ID_ADD, "Load Settings", (
200, -1), wx.DefaultSize, 0)
self.Bind(wx.EVT_BUTTON, self.load, id=wx.ID_ADD)
hbox.Add(load, 0.6, flag=wx.LEFT | wx.BOTTOM, border=5)
next = wx.Button(btnPanel, 3, "Next >", (200, -1), wx.DefaultSize, 0)
self.Bind(wx.EVT_BUTTON, self.load_next_stage, id=3)
hbox.Add(next, 0.6, flag=wx.LEFT | wx.BOTTOM, border=5)
# reminder: functions bound to buttons require arguments
# (self, event)
btnPanel.SetSizer(hbox)
#text_sizer = wx.BoxSizer(wx.HORIZONTAL)
#measure_text = wx.StaticText(self.window, label='Note: Regressor options \'MeanFD\' and \'Measure_Mean\' are automatically demeaned prior to being inserted into the model.')
#text_sizer.Add(measure_text)
#mainSizer.Add(text_sizer)
mainSizer.Add(
btnPanel, 0.5, flag=wx.ALIGN_RIGHT | wx.RIGHT, border=20)
self.panel.SetSizer(mainSizer)
self.Show()
# this fires only if we're coming BACK to this page from the second
# page, and these parameters are already pre-loaded. this is to
# automatically repopulate the 'Model Setup' checkbox grid and other
# settings under it
if self.gpa_settings['pheno_file'] != '':
phenoFile = open(os.path.abspath(self.gpa_settings['pheno_file']))
phenoHeaderString = phenoFile.readline().rstrip('\r\n')
phenoHeaderItems = phenoHeaderString.split(',')
phenoHeaderItems.remove(self.gpa_settings['subject_id_label'])
# update the 'Model Setup' box and populate it with the EVs and
# their associated checkboxes for categorical and demean
for ctrl in self.page.get_ctrl_list():
name = ctrl.get_name()
if name == 'model_setup':
ctrl.set_value(phenoHeaderItems)
ctrl.set_selection(self.gpa_settings['ev_selections'])
if name == 'coding_scheme':
ctrl.set_value(self.gpa_settings['coding_scheme'])
if name == 'mean_mask':
ctrl.set_value(self.gpa_settings['mean_mask'])
if name == 'repeated_measures':
ctrl.set_value(self.gpa_settings['repeated_measures'])
if name == 'z_threshold':
ctrl.set_value(self.gpa_settings['z_threshold'][0])
if name == 'p_threshold':
ctrl.set_value(self.gpa_settings['p_threshold'])
if name == 'use_zscore':
ctrl.set_value(self.gpa_settings['use_zscore'])
if name == 'group_sep':
ctrl.set_value(self.gpa_settings['group_sep'])
if name == 'grouping_var':
ctrl.set_value(self.gpa_settings['grouping_var'])
if name == 'derivative_list':
value = self.gpa_settings['derivative_list']
if isinstance(value, str):
value = value.replace("['","").replace("']","").split("', '")
new_derlist = []
# remove the _z if they are there, just so it can
# repopulate the listbox through the substitution map
for val in value:
if "_z" in val:
val = val.replace("_z","")
new_derlist.append(val)
else:
new_derlist.append(val)
ctrl.set_value(new_derlist)
def cancel(self, event):
self.Close()
def display(self, win, msg):
wx.MessageBox(msg, "Error")
win.SetBackgroundColour("pink")
win.SetFocus()
win.Refresh()
raise ValueError
def load_pheno(self,event):
pass
''' button: LOAD SETTINGS '''
def load(self, event):
# when the user clicks 'Load Settings', which loads the
# self.gpa_settings dictionary - it populates the values for both
# windows, so when they hit Next, the next window is also populated
dlg = wx.FileDialog(
self, message="Choose the config fsl yaml file",
defaultDir=os.getcwd(),
defaultFile="",
wildcard="YAML files(*.yaml, *.yml)|*.yaml;*.yml",
style=wx.OPEN | wx.CHANGE_DIR)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
config_map = yaml.load(open(path, 'r'))
s_map = dict((v, k) for k, v in substitution_map.iteritems())
# load the group analysis .yml config file (in dictionary form)
# into the self.gpa_settings dictionary which holds all settings
self.gpa_settings = config_map
if self.gpa_settings is None:
errDlgFileTest = wx.MessageDialog(
self, "Error reading file - group analysis " \
"configuration file appears to be blank.",
"File Read Error",
wx.OK | wx.ICON_ERROR)
errDlgFileTest.ShowModal()
errDlgFileTest.Destroy()
raise Exception
# repopulate the model setup checkbox grid, since this has to be
# done specially
if 'pheno_file' in self.gpa_settings.keys():
phenoFile = open(os.path.abspath(self.gpa_settings['pheno_file']))
phenoHeaderString = phenoFile.readline().rstrip('\r\n')
phenoHeaderItems = phenoHeaderString.split(',')
phenoHeaderItems.remove(self.gpa_settings['subject_id_label'])
# update the 'Model Setup' box and populate it with the EVs and
# their associated checkboxes for categorical and demean
for ctrl in self.page.get_ctrl_list():
if ctrl.get_name() == 'model_setup':
ctrl.set_value(phenoHeaderItems)
ctrl.set_selection(self.gpa_settings['ev_selections'])
# populate the rest of the controls
for ctrl in self.page.get_ctrl_list():
name = ctrl.get_name()
value = config_map.get(name)
dtype = ctrl.get_datatype()
# the model setup checkbox grid is the only one that doesn't
# get repopulated the standard way. instead it is repopulated
# by the code directly above
if name == 'derivative_list':
value = [s_map.get(item)
for item in value if s_map.get(item) != None]
if not value:
value = [str(item) for item in value]
new_derlist = []
for val in value:
if "_z" in val:
val = val.replace("_z","")
new_derlist.append(val)
else:
new_derlist.append(val)
ctrl.set_value(new_derlist)
elif name == 'repeated_measures' or name == 'use_zscore':
ctrl.set_value(str(value))
elif name == 'z_threshold' or name == 'p_threshold':
value = value[0]
ctrl.set_value(value)
elif name == 'group_sep':
value = s_map.get(value)
ctrl.set_value(value)
elif name != 'model_setup' and name != 'derivative_list':
ctrl.set_value(value)
dlg.Destroy()
def read_phenotypic(self, pheno_file, ev_selections):
import csv
ph = pheno_file
# Read in the phenotypic CSV file into a dictionary named pheno_dict
# while preserving the header fields as they correspond to the data
p_reader = csv.DictReader(open(os.path.abspath(ph), 'rU'), skipinitialspace=True)
#pheno_dict_list = []
# dictionary to store the data in a format Patsy can use
# i.e. a dictionary where each header is a key, and the value is a
# list of all of that header's values
pheno_data_dict = {}
for line in p_reader:
for key in line.keys():
if key not in pheno_data_dict.keys():
pheno_data_dict[key] = []
# create a list within one of the dictionary values for that
# EV if it is categorical; formats this list into a form
# Patsy can understand regarding categoricals:
# example: { ADHD: ['adhd1', 'adhd1', 'adhd2', 'adhd1'] }
# instead of just [1, 1, 2, 1], etc.
if 'categorical' in ev_selections.keys():
if key in ev_selections['categorical']:
pheno_data_dict[key].append(key + str(line[key]))
else:
pheno_data_dict[key].append(line[key])
else:
pheno_data_dict[key].append(line[key])
#pheno_dict_list.append(line)
# pheno_dict_list is a list of dictionaries of phenotype header items
# matched to their values, which also includes subject IDs
# i.e. [{'header1': 'value', 'header2': 'value'}, {'header1': 'value', 'header2': 'value'}, ..]
# these dictionaries are UNORDERED, i.e. header items ARE NOT ORDERED
return pheno_data_dict
''' button: LOAD PHENOTYPE FILE '''
def populateEVs(self, event):
# this runs when the user clicks 'Load Phenotype File'
if self.gpa_settings is None:
self.gpa_settings = {}
for ctrl in self.page.get_ctrl_list():
name = ctrl.get_name()
self.gpa_settings[name] = str(ctrl.get_selection())
### CHECK PHENOFILE if can open etc.
# function for file path checking
def testFile(filepath, paramName):
try:
fileTest = open(filepath)
fileTest.close()
except:
errDlgFileTest = wx.MessageDialog(
self, 'Error reading file - either it does not exist or you' \
' do not have read access. \n\n' \
'Parameter: %s' % paramName,
'File Access Error',
wx.OK | wx.ICON_ERROR)
errDlgFileTest.ShowModal()
errDlgFileTest.Destroy()
raise Exception
testFile(self.gpa_settings['subject_list'], 'Subject List')
testFile(self.gpa_settings['pheno_file'], 'Phenotype/EV File')
subFile = open(os.path.abspath(self.gpa_settings['subject_list']))
phenoFile = open(os.path.abspath(self.gpa_settings['pheno_file']),"rU")
phenoHeaderString = phenoFile.readline().rstrip('\r\n')
self.phenoHeaderItems = phenoHeaderString.split(',')
if self.gpa_settings['subject_id_label'] in self.phenoHeaderItems:
self.phenoHeaderItems.remove(self.gpa_settings['subject_id_label'])
else:
errSubID = wx.MessageDialog(
self, 'Please enter the name of the subject ID column' \
' as it is labeled in the phenotype file.',
'Blank/Incorrect Subject Header Input',
wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
# some more checks
sub_IDs = subFile.readlines()
self.subs = []
for sub in sub_IDs:
self.subs.append(sub.rstrip("\n"))
pheno_rows = phenoFile.readlines()
for row in pheno_rows:
# check if the pheno file produces any rows such as ",,,,," due
# to odd file formatting issues. if so, ignore this row. if there
# are values present in the row, continue as normal
if ",," not in row:
# if it finds a sub from the subject list in the current row
# taken from the pheno, move on. if it goes through the entire
# subject list and never finds a match, kick off the "else"
# clause below containing the error message
for sub in self.subs:
# for repeated measures-formatted files
if "," in sub:
# make the comma separator an underscore to match the
# repeated measures-formatted pheno file
if sub.replace(",","_") in row:
break
# for normal
else:
if sub in row:
break
else:
errSubID = wx.MessageDialog(
self, "Your phenotype file contains a subject ID " \
"that is not present in your group analysis " \
"subject list.\n\nPhenotype file row with subject " \
"ID not in subject list:\n%s" \
% row,
"Subject Not In List",
wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
for ctrl in self.page.get_ctrl_list():
# update the 'Model Setup' box and populate it with the EVs and
# their associated checkboxes for categorical and demean
if ctrl.get_name() == 'model_setup':
ctrl.set_value(self.phenoHeaderItems)
# populate the design formula text box with a formula which
# includes all of the EVs, and two of the measures (MeanFD and
# the measure/derivative mean) - the user can edit this if they
# need to, obviously
if ctrl.get_name() == 'design_formula':
formula_string = ''
for EV in self.phenoHeaderItems:
if formula_string == '':
formula_string = EV
else:
formula_string = formula_string + ' + ' + EV
formula_string = formula_string + ' + MeanFD_Jenkinson'
ctrl.set_value(formula_string)
''' button: NEXT '''
def load_next_stage(self, event):
import patsy
for ctrl in self.page.get_ctrl_list():
name = ctrl.get_name()
self.gpa_settings[name] = str(ctrl.get_selection())
### CHECK PHENOFILE if can open etc.
# function for file path checking
def testFile(filepath, paramName):
try:
fileTest = open(filepath)
fileTest.close()
except:
errDlgFileTest = wx.MessageDialog(
self, 'Error reading file - either it does not exist ' \
'or you do not have read access. \n\n' \
'Parameter: %s' % paramName,
'File Access Error',
wx.OK | wx.ICON_ERROR)
errDlgFileTest.ShowModal()
errDlgFileTest.Destroy()
raise Exception
testFile(self.gpa_settings['subject_list'], 'Subject List')
testFile(self.gpa_settings['pheno_file'], 'Phenotype/EV File')
phenoFile = open(os.path.abspath(self.gpa_settings['pheno_file']),"rU")
phenoHeaderString = phenoFile.readline().rstrip('\r\n')
self.phenoHeaderItems = phenoHeaderString.split(',')
if self.gpa_settings['subject_id_label'] in self.phenoHeaderItems:
self.phenoHeaderItems.remove(self.gpa_settings['subject_id_label'])
else:
errSubID = wx.MessageDialog(
self, 'Please enter the name of the subject ID column' \
' as it is labeled in the phenotype file.',
'Blank/Incorrect Subject Header Input',
wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
for ctrl in self.page.get_ctrl_list():
name = ctrl.get_name()
# get the design matrix formula
if name == 'design_formula':
self.gpa_settings['design_formula'] = str(ctrl.get_selection())
# get the EV categorical + demean grid selections
elif name == 'model_setup':
# basically, ctrl is checkbox_grid in this case, and
# get_selection goes to generic_class.py first, which links
# it to the custom GetGridSelection() function in the
# checkbox_grid class in custom_control.py
self.gpa_settings['ev_selections'] = ctrl.get_selection()
elif name == 'group_sep':
self.gpa_settings['group_sep'] = ctrl.get_selection()
elif name == 'grouping_var':
self.gpa_settings['grouping_var'] = ctrl.get_selection()
if name == 'derivative_list':
# grab this for below
derlist_ctrl = ctrl
else:
self.gpa_settings[name] = str(ctrl.get_selection())
self.gpa_settings['derivative_list'] = []
for derivative in list(derlist_ctrl.get_selection()):
if self.gpa_settings['use_zscore'] == "True":
self.gpa_settings['derivative_list'].append(derivative + "_z")
else:
self.gpa_settings['derivative_list'].append(derivative)
self.pheno_data_dict = self.read_phenotypic(self.gpa_settings['pheno_file'], self.gpa_settings['ev_selections'])
try:
phenoFile = open(os.path.abspath(self.gpa_settings['pheno_file']))
except:
print '\n\n[!] CPAC says: The phenotype file path provided ' \
'couldn\'t be opened - either it does not exist or ' \
'there are access restrictions.\n'
print 'Phenotype file provided: '
print self.gpa_settings['pheno_file'], '\n\n'
raise IOError
# validate design formula and build Available Contrasts list
var_list_for_contrasts = []
EVs_to_test = []
EVs_to_include = []
# take the user-provided design formula and break down the included
# terms into a list, and use this to create the list of available
# contrasts
formula = self.gpa_settings['design_formula']
# need to cycle through the EVs inside parentheses just to make
# sure they are valid
# THEN you have to treat the entire parentheses thing as one EV when
# it comes to including it in the list for contrasts
formula_strip = formula.replace('+',' ')
formula_strip = formula_strip.replace('-',' ')
formula_strip = formula_strip.replace('**(', '**')
formula_strip = formula_strip.replace(')**', '**')
formula_strip = formula_strip.replace('(',' ')
formula_strip = formula_strip.replace(')',' ')
EVs_to_test = formula_strip.split()
# ensure the design formula only has valid EVs in it
for EV in EVs_to_test:
# ensure ** interactions have a valid EV on one side and a number
# on the other
if '**' in EV:
both_sides = EV.split('**')
int_check = 0
for side in both_sides:
if side.isdigit():
int_check = 1
else:
if (side not in self.pheno_data_dict.keys()) and \
side != 'MeanFD' and side != 'MeanFD_Jenkinson' \
and side != 'Measure_Mean' and \
side != 'Custom_ROI_Mean':
errmsg = 'CPAC says: The regressor \'%s\' you ' \
'entered within the design formula as ' \
'part of the interaction \'%s\' is not ' \
'a valid EV option.\n\nPlease enter ' \
'only the EVs in your phenotype file ' \
'or the MeanFD, MeanFD_Jenkinson, ' \
'Custom_ROI_Mean, or Measure_Mean ' \
'options.' \
% (side,EV)
errSubID = wx.MessageDialog(self, errmsg,
'Invalid EV', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
if int_check != 1:
errmsg = 'CPAC says: The interaction \'%s\' you ' \
'entered within the design formula requires ' \
'a number on one side.\n\nExample: ' \
'(EV1 + EV2 + EV3)**3\n\nNote: This would be ' \
'equivalent to\n(EV1 + EV2 + EV3) * ' \
'(EV1 + EV2 + EV3) * (EV1 + EV2 + EV3)' % EV
errSubID = wx.MessageDialog(self, errmsg,
'Invalid EV', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
# ensure these interactions are input correctly
elif (':' in EV) or ('/' in EV) or ('*' in EV):
if ':' in EV:
both_EVs_in_interaction = EV.split(':')
if '/' in EV:
both_EVs_in_interaction = EV.split('/')
if '*' in EV:
both_EVs_in_interaction = EV.split('*')
for interaction_EV in both_EVs_in_interaction:
if (interaction_EV not in self.pheno_data_dict.keys()) and \
interaction_EV != 'MeanFD' and \
interaction_EV != 'MeanFD_Jenkinson' and \
interaction_EV != 'Measure_Mean' and \
interaction_EV != 'Custom_ROI_Mean':
errmsg = 'CPAC says: The regressor \'%s\' you ' \
'entered within the design formula as ' \
'part of the interaction \'%s\' is not a ' \
'valid EV option.\n\nPlease enter only ' \
'the EVs in your phenotype file or the ' \
'MeanFD, MeanFD_Jenkinson, Custom_ROI_' \
'Mean, or Measure_Mean options.' \
% (interaction_EV,EV)
errSubID = wx.MessageDialog(self, errmsg,
'Invalid EV', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
else:
if (EV not in self.pheno_data_dict.keys()) and EV != 'MeanFD' \
and EV != 'MeanFD_Jenkinson' and EV != 'Measure_Mean' \
and EV != 'Custom_ROI_Mean':
errmsg = 'CPAC says: The regressor \'%s\' you ' \
'entered within the design formula is not ' \
'a valid EV option.' \
'\n\nPlease enter only the EVs in your phenotype ' \
'file or the MeanFD, MeanFD_Jenkinson, ' \
'Custom_ROI_Mean, or Measure_Mean options.' \
% EV
errSubID = wx.MessageDialog(self, errmsg,
'Invalid EV', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
''' design formula/input parameters checks '''
if "Custom_ROI_Mean" in formula and \
(self.gpa_settings['custom_roi_mask'] == None or \
self.gpa_settings['custom_roi_mask'] == ""):
err_string = "You included 'Custom_ROI_Mean' as a regressor " \
"in your Design Matrix Formula, but you did not " \
"specify a Custom ROI Mean Mask file.\n\nPlease " \
"either specify a mask file, or remove " \
"'Custom_ROI_Mean' from your model."
errSubID = wx.MessageDialog(self, err_string,
'No Custom ROI Mean Mask File', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
if "Custom_ROI_Mean" not in formula and \
(self.gpa_settings['custom_roi_mask'] != None and \
self.gpa_settings['custom_roi_mask'] != "" and \
self.gpa_settings['custom_roi_mask'] != "None" and \
self.gpa_settings['custom_roi_mask'] != "none"):
warn_string = "Note: You specified a Custom ROI Mean Mask file, " \
"but you did not include 'Custom_ROI_Mean' as a " \
"regressor in your Design Matrix Formula.\n\nThe " \
"means of the ROIs specified in the file will not " \
"be included as regressors unless you include " \
"'Custom_ROI_Mean' in your model."
errSubID = wx.MessageDialog(self, warn_string,
'No Custom_ROI_Mean Regressor', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
if str(self.gpa_settings["use_zscore"]) == "True":
if "Measure_Mean" in formula:
warn_string = "Note: You have included Measure_Mean as a " \
"regressor in your model, but you have selected to run " \
"the group-level analysis with the z-score standardized "\
"version of the outputs.\n\nThe mean of any z-score " \
"standardized output will always be zero."
errSubID = wx.MessageDialog(self, warn_string,
'Measure_Mean Included With z-scored Outputs', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
else:
for deriv in self.gpa_settings["derivative_list"]:
if "VMHC" in deriv:
warn_string = "Note: You have selected to run group-" \
"level analysis using raw outputs (non-z-score " \
"standardized), but you have also included VMHC " \
"as one of the outputs to include in your model."
errSubID = wx.MessageDialog(self, warn_string,
'VMHC Cannot Be Included As Raw Output', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
# if there is a custom ROI mean mask file provided, and the user
# includes it as a regressor in their design matrix formula, calculate
# the number of ROIs in the file and generate the column names so that
# they can be passed as possible contrast labels
if "Custom_ROI_Mean" in formula and \
(self.gpa_settings['custom_roi_mask'] != None and \
self.gpa_settings['custom_roi_mask'] != "" and \
self.gpa_settings['custom_roi_mask'] != "None" and \
self.gpa_settings['custom_roi_mask'] != "none"):
import commands
try:
ROIstats_output = commands.getoutput("3dROIstats -mask %s %s" \
% (self.gpa_settings['custom_roi_mask'], \
self.gpa_settings['custom_roi_mask']))
except Exception as e:
print "[!] CPAC says: AFNI 3dROIstats failed for custom ROI" \
"Mean Mask file validation. Please ensure you either " \
"have AFNI installed and that you created the mask " \
"file properly. Consult the User Guide for more " \
"information.\n\n"
print "Error details: %s\n\n" % e
raise
ROIstats_list = ROIstats_output.split("\t")
# calculate the number of ROIs - 3dROIstats output can be split
# into a list, and the actual ROI means begin at a certain point
num_rois = (len(ROIstats_list)-3)/2
custom_roi_labels = []
for num in range(0,num_rois):
custom_roi_labels.append("Custom_ROI_Mean_%d" % int(num+1))
if str(self.gpa_settings["group_sep"]) == "On":
if (self.gpa_settings["grouping_var"] == "None") or \
(self.gpa_settings["grouping_var"] is None) or \
(self.gpa_settings["grouping_var"] == "none"):
warn_string = "Note: You have selected to model group " \
"variances separately, but you have not specified a " \
"grouping variable."
errSubID = wx.MessageDialog(self, warn_string,
'No Grouping Variable Specified', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
if self.gpa_settings["grouping_var"] not in formula:
warn_string = "Note: You have specified '%s' as your " \
"grouping variable for modeling the group variances " \
"separately, but you have not included this variable " \
"in your design formula.\n\nPlease include this " \
"variable in your design, or choose a different " \
"grouping variable." % self.gpa_settings["grouping_var"]
errSubID = wx.MessageDialog(self, warn_string,
'Grouping Variable not in Design', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
def read_phenotypic(pheno_file, ev_selections, subject_id_label):
import csv
import numpy as np
ph = pheno_file
# Read in the phenotypic CSV file into a dictionary named pheno_dict
# while preserving the header fields as they correspond to the data
p_reader = csv.DictReader(open(os.path.abspath(ph), 'rU'), skipinitialspace=True)
# dictionary to store the data in a format Patsy can use
# i.e. a dictionary where each header is a key, and the value is a
# list of all of that header's values
pheno_data_dict = {}
for line in p_reader:
# here, each instance of 'line' is really a dictionary where the
# keys are the pheno headers, and their values are the values of
# each EV for that one subject - each iteration of this loop is
# one subject
for key in line.keys():
if key not in pheno_data_dict.keys():
pheno_data_dict[key] = []
# create a list within one of the dictionary values for that
# EV if it is categorical; formats this list into a form
# Patsy can understand regarding categoricals:
# example: { ADHD: ['adhd1', 'adhd1', 'adhd0', 'adhd1'] }
# instead of just [1, 1, 0, 1], etc.
if 'categorical' in ev_selections.keys():
if key in ev_selections['categorical']:
pheno_data_dict[key].append(key + str(line[key]))
elif key == subject_id_label:
pheno_data_dict[key].append(line[key])
else:
pheno_data_dict[key].append(float(line[key]))
elif key == subject_id_label:
pheno_data_dict[key].append(line[key])
else:
pheno_data_dict[key].append(float(line[key]))
# this needs to run after each list in each key has been fully
# populated above
for key in pheno_data_dict.keys():
# demean the EVs marked for demeaning
if 'demean' in ev_selections.keys():
if key in ev_selections['demean']:
new_demeaned_evs = []
mean_evs = 0.0
# populate a dictionary, a key for each demeanable EV, with
# the value being the sum of all the values (which need to be
# converted to float first)
for val in pheno_data_dict[key]:
mean_evs += float(val)
# calculate the mean of the current EV in this loop
mean_evs = mean_evs / len(pheno_data_dict[key])
# remove the EV's mean from each value of this EV
# (demean it!)
for val in pheno_data_dict[key]:
new_demeaned_evs.append(float(val) - mean_evs)
# replace
pheno_data_dict[key] = new_demeaned_evs
# converts non-categorical EV lists into NumPy arrays
# so that Patsy may read them in properly
if 'categorical' in ev_selections.keys():
if key not in ev_selections['categorical']:
pheno_data_dict[key] = np.array(pheno_data_dict[key])
return pheno_data_dict
patsy_formatted_pheno = read_phenotypic(self.gpa_settings['pheno_file'], self.gpa_settings['ev_selections'], self.gpa_settings['subject_id_label'])
# let's create dummy columns for MeanFD, Measure_Mean, and
# Custom_ROI_Mask (if included in the Design Matrix Formula) just so we
# can get an accurate list of EVs Patsy will generate
def create_regressor_column(regressor):
# regressor should be a string of the name of the regressor
import numpy as np
regressor_list = []
for key in patsy_formatted_pheno.keys():
for val in patsy_formatted_pheno[key]:
regressor_list.append(0.0)
break
regressor_list = np.array(regressor_list)
patsy_formatted_pheno[regressor] = regressor_list
if 'MeanFD' in formula:
create_regressor_column('MeanFD')
if 'MeanFD_Jenkinson' in formula:
create_regressor_column('MeanFD_Jenkinson')
if 'Measure_Mean' in formula:
create_regressor_column('Measure_Mean')
if 'Custom_ROI_Mean' in formula:
add_formula_string = ""
for col_label in custom_roi_labels:
create_regressor_column(col_label)
# create a string of all the new custom ROI regressor column
# names to be inserted into the design formula, so that Patsy
# will accept the phenotypic data dictionary that now has these
# columns
if add_formula_string == "":
add_formula_string = add_formula_string + col_label
else:
add_formula_string = add_formula_string + " + " + col_label
formula = formula.replace("Custom_ROI_Mean",add_formula_string)
if 'categorical' in self.gpa_settings['ev_selections']:
for EV_name in self.gpa_settings['ev_selections']['categorical']:
if self.gpa_settings['coding_scheme'] == 'Treatment':
formula = formula.replace(EV_name, 'C(' + EV_name + ')')
elif self.gpa_settings['coding_scheme'] == 'Sum':
formula = formula.replace(EV_name, 'C(' + EV_name + ', Sum)')
# create the dmatrix in Patsy just to see what the design matrix
# columns are going to be
try:
dmatrix = patsy.dmatrix(formula, patsy_formatted_pheno)
except:
print '\n\n[!] CPAC says: Design matrix creation wasn\'t ' \
'successful - do the terms in your formula correctly ' \
'correspond to the EVs listed in your phenotype file?\n'
print 'Phenotype file provided: '
print self.gpa_settings['pheno_file'], '\n\n'
raise Exception
column_names = dmatrix.design_info.column_names
subFile = open(os.path.abspath(self.gpa_settings['subject_list']))
sub_IDs = subFile.readlines()
self.subs = []
for sub in sub_IDs:
self.subs.append(sub.rstrip("\n"))
# check to make sure there are more subjects than EVs!!
if len(column_names) >= len(self.subs):
err = "There are more (or an equal amount of) EVs currently " \
"included in the model than there are subjects in the " \
"group analysis subject list. There must be more " \
"subjects than EVs in the design.\n\nNumber of subjects: " \
"%d\nNumber of EVs: %d\n\nNote: An 'Intercept' " \
"column gets added to the design as an EV, so there will " \
"be one more EV than you may have specified in your " \
"design." % (len(self.subs),len(column_names))
errSubID = wx.MessageDialog(self, err,
"Too Many EVs or Too Few Subjects",
wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
raw_column_strings = []
# remove the header formatting Patsy creates for categorical variables
# because we are going to use var_list_for_contrasts as a label for
# users to know what contrasts are available to them
for column in column_names:
# if using Sum encoding, a column name may look like this:
# C(adhd, Sum)[S.adhd0]
# this loop leaves it with only "adhd0" in this case, for the
# contrasts list for the next GUI page
column_string = column
string_for_removal = ''
for char in column_string:
string_for_removal = string_for_removal + char
if char == '.':
column_string = column_string.replace(string_for_removal, '')
string_for_removal = ''
column_string = column_string.replace(']', '')
if ":" in column_string:
try:
column_string = column_string.split("[")[1]
except:
pass
raw_column_strings.append(column_string)
if str(self.gpa_settings["group_sep"]) == "On":
grouping_options = []
idx = 0
for column_string in raw_column_strings:
if self.gpa_settings["grouping_var"] in column_string:
grouping_variable_info = []
grouping_variable_info.append(column_string)
grouping_variable_info.append(idx)
grouping_options.append(grouping_variable_info)
# grouping_var_idx is the column numbers in the design matrix
# which holds the grouping variable (and its possible levels)
idx += 1
# all the categorical values/levels of the grouping variable
grouping_var_levels = []
for gv_idx in grouping_options:
for subject in dmatrix:
if self.gpa_settings["grouping_var"] in self.gpa_settings["ev_selections"]["categorical"]:
level_num = str(int(subject[gv_idx[1]]))
else:
level_num = str(subject[gv_idx[1]])
level_label = '__' + self.gpa_settings["grouping_var"] + level_num
if level_label not in grouping_var_levels:
grouping_var_levels.append(level_label)
# make the new header for the reorganized data
for column_string in raw_column_strings:
if column_string != "Intercept":
if self.gpa_settings["grouping_var"] not in column_string:
for level in grouping_var_levels:
var_list_for_contrasts.append(column_string + level)
elif self.gpa_settings["grouping_var"] in column_string:
var_list_for_contrasts.append(column_string)
else:
for column_string in raw_column_strings:
if column_string != 'Intercept':
var_list_for_contrasts.append(column_string)
# check for repeated measures file formatting!
group_sublist_file = open(self.gpa_settings['subject_list'], 'r')
group_sublist_items = group_sublist_file.readlines()
group_sublist = [line.rstrip('\n') for line in group_sublist_items \
if not (line == '\n') and not line.startswith('#')]
for ga_sub in group_sublist:
# ga_sub = subject ID taken off the group analysis subject list
# let's check to make sure the subject list is formatted for
# repeated measures properly if repeated measures is enabled
# and vice versa
if (self.gpa_settings['repeated_measures'] == "True") and \
(',' not in ga_sub):
errmsg = "The group analysis subject list is not in the " \
"appropriate format for repeated measures. Please " \
"use the appropriate format as described in the " \
"CPAC User Guide, or turn off Repeated Measures." \
"\n\nNote: CPAC generates a properly-formatted " \
"group analysis subject list meant for running " \
"repeated measures when you create your original " \
"subject list. Look for 'subject_list_group_" \
"analysis_repeated_measures.txt' in the directory " \
"where you created your subject list."
errSubID = wx.MessageDialog(self, errmsg,
'Subject List Format', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
elif (self.gpa_settings['repeated_measures'] == "False") and \
(',' in ga_sub):
errmsg = "It looks like your group analysis subject list is " \
"formatted for running repeated measures, but " \
"'Run Repeated Measures' is not enabled."
errSubID = wx.MessageDialog(self, errmsg,
'Subject List Format', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
# make sure the sub IDs in the sublist and pheno files match!
group_pheno_file = open(self.gpa_settings['pheno_file'], 'r')
group_pheno_lines = group_pheno_file.readlines()
# gather the subject IDs from the phenotype file
def get_pheno_subjects(delimiter):
for item in group_pheno_lines[0].split(delimiter):
if item == self.gpa_settings['subject_id_label']:
index = group_pheno_lines[0].index(item)
group_pheno_subs = group_pheno_lines[1:len(group_pheno_lines)]
pheno_subs = []
for pheno_sub_line in group_pheno_subs:
pheno_subs.append(pheno_sub_line.split(delimiter)[index])
return pheno_subs
pheno_subs = []
if "," in group_pheno_lines[0]:
pheno_subs = get_pheno_subjects(",")
# now make sure the group sublist and pheno subject IDs match, at least
# for the ones that exist (i.e. may be less sub IDs in the sublist)
for sublist_subID, pheno_subID in zip(group_sublist, pheno_subs):
# if group sublist is formatted for repeated measures
if "," in sublist_subID:
sublist_subID = sublist_subID.replace(",","_")
if sublist_subID != pheno_subID:
if self.gpa_settings['repeated_measures'] == "False":
errmsg = "The subject IDs in your group subject list " \
"and your phenotype file do not match. Please " \
"make sure these have been set up correctly."
else:
errmsg = "The subject IDs in your group subject list " \
"and your phenotype file do not match. Please " \
"make sure these have been set up correctly." \
"\n\nNote: Repeated measures is enabled - does "\
"your phenotype file have properly-formatted " \
"subject IDs matching your repeated measures " \
"group analysis subject list?"
errSubID = wx.MessageDialog(self, errmsg,
'Subject ID Mismatch', wx.OK | wx.ICON_ERROR)
errSubID.ShowModal()
errSubID.Destroy()
raise Exception
# open the next window!
modelDesign_window.ModelDesign(self.parent, self.gpa_settings, var_list_for_contrasts) # !!! may need to pass the actual dmatrix as well
self.Close()
|
{
"content_hash": "aab519d9d8efa60f4898375521dd19dd",
"timestamp": "",
"source": "github",
"line_count": 1502,
"max_line_length": 568,
"avg_line_length": 39.826897470039945,
"alnum_prop": 0.5083584085590104,
"repo_name": "erramuzpe/C-PAC",
"id": "66a5e9d86e835936b55df5b31b6c86883318ce97",
"size": "59820",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "CPAC/GUI/interface/utils/modelconfig_window.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import abc
import functools
import os
from oslo.utils import importutils
import six
import webob.dec
import webob.exc
import nova.api.openstack
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.openstack.common import log as logging
import nova.policy
LOG = logging.getLogger(__name__)
class ExtensionDescriptor(object):
"""Base class that defines the contract for extensions.
Note that you don't have to derive from this class to have a valid
extension; it is purely a convenience.
"""
# The name of the extension, e.g., 'Fox In Socks'
name = None
# The alias for the extension, e.g., 'FOXNSOX'
alias = None
# Description comes from the docstring for the class
# The XML namespace for the extension, e.g.,
# 'http://www.fox.in.socks/api/ext/pie/v1.0'
namespace = None
# The timestamp when the extension was last updated, e.g.,
# '2011-01-22T19:25:27Z'
updated = None
def __init__(self, ext_mgr):
"""Register extension with the extension manager."""
ext_mgr.register(self)
self.ext_mgr = ext_mgr
def get_resources(self):
"""List of extensions.ResourceExtension extension objects.
Resources define new nouns, and are accessible through URLs.
"""
resources = []
return resources
def get_controller_extensions(self):
"""List of extensions.ControllerExtension extension objects.
Controller extensions are used to extend existing controllers.
"""
controller_exts = []
return controller_exts
@classmethod
def nsmap(cls):
"""Synthesize a namespace map from extension."""
# Start with a base nsmap
nsmap = ext_nsmap.copy()
# Add the namespace for the extension
nsmap[cls.alias] = cls.namespace
return nsmap
@classmethod
def xmlname(cls, name):
"""Synthesize element and attribute names."""
return '{%s}%s' % (cls.namespace, name)
def make_ext(elem):
elem.set('name')
elem.set('namespace')
elem.set('alias')
elem.set('updated')
desc = xmlutil.SubTemplateElement(elem, 'description')
desc.text = 'description'
xmlutil.make_links(elem, 'links')
ext_nsmap = {None: xmlutil.XMLNS_COMMON_V10, 'atom': xmlutil.XMLNS_ATOM}
class ExtensionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extension', selector='extension')
make_ext(root)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('extensions')
elem = xmlutil.SubTemplateElement(root, 'extension',
selector='extensions')
make_ext(elem)
return xmlutil.MasterTemplate(root, 1, nsmap=ext_nsmap)
class ExtensionsController(wsgi.Resource):
def __init__(self, extension_manager):
self.extension_manager = extension_manager
super(ExtensionsController, self).__init__(None)
def _translate(self, ext):
ext_data = {}
ext_data['name'] = ext.name
ext_data['alias'] = ext.alias
ext_data['description'] = ext.__doc__
ext_data['namespace'] = ext.namespace
ext_data['updated'] = ext.updated
ext_data['links'] = [] # TODO(dprince): implement extension links
return ext_data
@wsgi.serializers(xml=ExtensionsTemplate)
def index(self, req):
extensions = []
for ext in self.extension_manager.sorted_extensions():
extensions.append(self._translate(ext))
return dict(extensions=extensions)
@wsgi.serializers(xml=ExtensionTemplate)
def show(self, req, id):
try:
# NOTE(dprince): the extensions alias is used as the 'id' for show
ext = self.extension_manager.extensions[id]
except KeyError:
raise webob.exc.HTTPNotFound()
return dict(extension=self._translate(ext))
def delete(self, req, id):
raise webob.exc.HTTPNotFound()
def create(self, req, body):
raise webob.exc.HTTPNotFound()
class ExtensionManager(object):
"""Load extensions from the configured extension path.
See nova/tests/api/openstack/compute/extensions/foxinsocks.py or an
example extension implementation.
"""
def sorted_extensions(self):
if self.sorted_ext_list is None:
self.sorted_ext_list = sorted(self.extensions.iteritems())
for _alias, ext in self.sorted_ext_list:
yield ext
def is_loaded(self, alias):
return alias in self.extensions
def register(self, ext):
# Do nothing if the extension doesn't check out
if not self._check_extension(ext):
return
alias = ext.alias
if alias in self.extensions:
raise exception.NovaException("Found duplicate extension: %s"
% alias)
self.extensions[alias] = ext
self.sorted_ext_list = None
def get_resources(self):
"""Returns a list of ResourceExtension objects."""
resources = []
resources.append(ResourceExtension('extensions',
ExtensionsController(self)))
for ext in self.sorted_extensions():
try:
resources.extend(ext.get_resources())
except AttributeError:
# NOTE(dprince): Extension aren't required to have resource
# extensions
pass
return resources
def get_controller_extensions(self):
"""Returns a list of ControllerExtension objects."""
controller_exts = []
for ext in self.sorted_extensions():
try:
get_ext_method = ext.get_controller_extensions
except AttributeError:
# NOTE(Vek): Extensions aren't required to have
# controller extensions
continue
controller_exts.extend(get_ext_method())
return controller_exts
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug('Ext name: %s', extension.name)
LOG.debug('Ext alias: %s', extension.alias)
LOG.debug('Ext description: %s',
' '.join(extension.__doc__.strip().split()))
LOG.debug('Ext namespace: %s', extension.namespace)
LOG.debug('Ext updated: %s', extension.updated)
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), unicode(ex))
return False
return True
def load_extension(self, ext_factory):
"""Execute an extension factory.
Loads an extension. The 'ext_factory' is the name of a
callable that will be imported and called with one
argument--the extension manager. The factory callable is
expected to call the register() method at least once.
"""
LOG.debug("Loading extension %s", ext_factory)
if isinstance(ext_factory, six.string_types):
# Load the factory
factory = importutils.import_class(ext_factory)
else:
factory = ext_factory
# Call it
LOG.debug("Calling extension factory %s", ext_factory)
factory(self)
def _load_extensions(self):
"""Load extensions specified on the command line."""
extensions = list(self.cls_list)
for ext_factory in extensions:
try:
self.load_extension(ext_factory)
except Exception as exc:
LOG.warn(_LW('Failed to load extension %(ext_factory)s: '
'%(exc)s'),
{'ext_factory': ext_factory, 'exc': exc})
class ControllerExtension(object):
"""Extend core controllers of nova OpenStack API.
Provide a way to extend existing nova OpenStack API core
controllers.
"""
def __init__(self, extension, collection, controller):
self.extension = extension
self.collection = collection
self.controller = controller
class ResourceExtension(object):
"""Add top level resources to the OpenStack API in nova."""
def __init__(self, collection, controller=None, parent=None,
collection_actions=None, member_actions=None,
custom_routes_fn=None, inherits=None, member_name=None):
if not collection_actions:
collection_actions = {}
if not member_actions:
member_actions = {}
self.collection = collection
self.controller = controller
self.parent = parent
self.collection_actions = collection_actions
self.member_actions = member_actions
self.custom_routes_fn = custom_routes_fn
self.inherits = inherits
self.member_name = member_name
def load_standard_extensions(ext_mgr, logger, path, package, ext_list=None):
"""Registers all standard API extensions."""
# Walk through all the modules in our directory...
our_dir = path[0]
for dirpath, dirnames, filenames in os.walk(our_dir):
# Compute the relative package name from the dirpath
relpath = os.path.relpath(dirpath, our_dir)
if relpath == '.':
relpkg = ''
else:
relpkg = '.%s' % '.'.join(relpath.split(os.sep))
# Now, consider each file in turn, only considering .py files
for fname in filenames:
root, ext = os.path.splitext(fname)
# Skip __init__ and anything that's not .py
if ext != '.py' or root == '__init__':
continue
# Try loading it
classname = "%s%s" % (root[0].upper(), root[1:])
classpath = ("%s%s.%s.%s" %
(package, relpkg, root, classname))
if ext_list is not None and classname not in ext_list:
logger.debug("Skipping extension: %s" % classpath)
continue
try:
ext_mgr.load_extension(classpath)
except Exception as exc:
logger.warn(_('Failed to load extension %(classpath)s: '
'%(exc)s'),
{'classpath': classpath, 'exc': exc})
# Now, let's consider any subdirectories we may have...
subdirs = []
for dname in dirnames:
# Skip it if it does not have __init__.py
if not os.path.exists(os.path.join(dirpath, dname, '__init__.py')):
continue
# If it has extension(), delegate...
ext_name = "%s%s.%s.extension" % (package, relpkg, dname)
try:
ext = importutils.import_class(ext_name)
except ImportError:
# extension() doesn't exist on it, so we'll explore
# the directory for ourselves
subdirs.append(dname)
else:
try:
ext(ext_mgr)
except Exception as exc:
logger.warn(_('Failed to load extension %(ext_name)s:'
'%(exc)s'),
{'ext_name': ext_name, 'exc': exc})
# Update the list of directories we'll explore...
dirnames[:] = subdirs
def core_authorizer(api_name, extension_name):
def authorize(context, target=None, action=None):
if target is None:
target = {'project_id': context.project_id,
'user_id': context.user_id}
if action is None:
act = '%s:%s' % (api_name, extension_name)
else:
act = '%s:%s:%s' % (api_name, extension_name, action)
nova.policy.enforce(context, act, target)
return authorize
def extension_authorizer(api_name, extension_name):
return core_authorizer('%s_extension' % api_name, extension_name)
def soft_authorizer(hard_authorizer, api_name, extension_name):
hard_authorize = hard_authorizer(api_name, extension_name)
def authorize(context, action=None):
try:
hard_authorize(context, action=action)
return True
except exception.Forbidden:
return False
return authorize
def soft_extension_authorizer(api_name, extension_name):
return soft_authorizer(extension_authorizer, api_name, extension_name)
def soft_core_authorizer(api_name, extension_name):
return soft_authorizer(core_authorizer, api_name, extension_name)
def check_compute_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
@six.add_metaclass(abc.ABCMeta)
class V3APIExtensionBase(object):
"""Abstract base class for all V3 API extensions.
All V3 API extensions must derive from this class and implement
the abstract methods get_resources and get_controller_extensions
even if they just return an empty list. The extensions must also
define the abstract properties.
"""
def __init__(self, extension_info):
self.extension_info = extension_info
@abc.abstractmethod
def get_resources(self):
"""Return a list of resources extensions.
The extensions should return a list of ResourceExtension
objects. This list may be empty.
"""
pass
@abc.abstractmethod
def get_controller_extensions(self):
"""Return a list of controller extensions.
The extensions should return a list of ControllerExtension
objects. This list may be empty.
"""
pass
@abc.abstractproperty
def name(self):
"""Name of the extension."""
pass
@abc.abstractproperty
def alias(self):
"""Alias for the extension."""
pass
@abc.abstractproperty
def version(self):
"""Version of the extension."""
pass
def expected_errors(errors):
"""Decorator for v3 API methods which specifies expected exceptions.
Specify which exceptions may occur when an API method is called. If an
unexpected exception occurs then return a 500 instead and ask the user
of the API to file a bug report.
"""
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as exc:
if isinstance(exc, webob.exc.WSGIHTTPException):
if isinstance(errors, int):
t_errors = (errors,)
else:
t_errors = errors
if exc.code in t_errors:
raise
elif isinstance(exc, exception.PolicyNotAuthorized):
# Note(cyeoh): Special case to handle
# PolicyNotAuthorized exceptions so every
# extension method does not need to wrap authorize
# calls. ResourceExceptionHandler silently
# converts NotAuthorized to HTTPForbidden
raise
elif isinstance(exc, exception.ValidationError):
# Note(oomichi): Handle a validation error, which
# happens due to invalid API parameters, as an
# expected error.
raise
LOG.exception(_("Unexpected exception in API method"))
msg = _('Unexpected API Error. Please report this at '
'http://bugs.launchpad.net/nova/ and attach the Nova '
'API log if possible.\n%s') % type(exc)
raise webob.exc.HTTPInternalServerError(explanation=msg)
return wrapped
return decorator
|
{
"content_hash": "91fe0d2b7eb80a981feb575d3183c4df",
"timestamp": "",
"source": "github",
"line_count": 493,
"max_line_length": 79,
"avg_line_length": 32.66125760649087,
"alnum_prop": 0.5945845236616569,
"repo_name": "vmthunder/nova",
"id": "8bb8d38befd5c73f6da1c6e542de66fe25ec3ba0",
"size": "16776",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/api/openstack/extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import unittest
import imath
import IECore
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class ParentConstraintTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
plane1 = GafferScene.Plane()
plane1["transform"]["translate"].setValue( imath.V3f( 1, 2, 3 ) )
plane1["transform"]["scale"].setValue( imath.V3f( 1, 2, 3 ) )
plane1["transform"]["rotate"].setValue( imath.V3f( 1000, 20, 39 ) )
plane1["name"].setValue( "target" )
plane2 = GafferScene.Plane()
plane2["name"].setValue( "constrained" )
group = GafferScene.Group()
group["in"][0].setInput( plane1["out"] )
group["in"][1].setInput( plane2["out"] )
self.assertSceneValid( group["out"] )
constraint = GafferScene.ParentConstraint()
constraint["target"].setValue( "/group/target" )
constraint["in"].setInput( group["out"] )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/constrained" ] ) )
constraint["filter"].setInput( filter["out"] )
self.assertSceneValid( constraint["out"] )
self.assertEqual( constraint["out"].fullTransform( "/group/constrained" ), group["out"].fullTransform( "/group/target" ) )
def testRelativeTransform( self ) :
plane1 = GafferScene.Plane()
plane1["transform"]["translate"].setValue( imath.V3f( 1, 2, 3 ) )
plane1["transform"]["rotate"].setValue( imath.V3f( 0, 90, 0 ) )
plane1["name"].setValue( "target" )
plane2 = GafferScene.Plane()
plane2["name"].setValue( "constrained" )
group = GafferScene.Group()
group["in"][0].setInput( plane1["out"] )
group["in"][1].setInput( plane2["out"] )
self.assertSceneValid( group["out"] )
constraint = GafferScene.ParentConstraint()
constraint["target"].setValue( "/group/target" )
constraint["in"].setInput( group["out"] )
constraint["relativeTransform"]["translate"].setValue( imath.V3f( 1, 0, 0 ) )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/constrained" ] ) )
constraint["filter"].setInput( filter["out"] )
self.assertSceneValid( constraint["out"] )
self.assertEqual( constraint["out"].fullTransform( "/group/constrained" ), imath.M44f().translate( imath.V3f( 1, 0, 0 ) ) * group["out"].fullTransform( "/group/target" ) )
def testDirtyPropagation( self ) :
plane1 = GafferScene.Plane()
plane2 = GafferScene.Plane()
group = GafferScene.Group()
group["in"][0].setInput( plane1["out"] )
group["in"][1].setInput( plane2["out"] )
constraint = GafferScene.ParentConstraint()
constraint["target"].setValue( "/group/target" )
constraint["in"].setInput( group["out"] )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/constrained" ] ) )
constraint["filter"].setInput( filter["out"] )
cs = GafferTest.CapturingSlot( constraint.plugDirtiedSignal() )
constraint["relativeTransform"]["translate"]["x"].setValue( 10 )
plugs = { x[0] for x in cs if not x[0].getName().startswith( "__" ) }
self.assertEqual(
plugs,
{
constraint["relativeTransform"]["translate"]["x"],
constraint["relativeTransform"]["translate"],
constraint["relativeTransform"],
constraint["out"]["bound"],
constraint["out"]["childBounds"],
constraint["out"]["transform"],
constraint["out"]
}
)
def testParentNodeEquivalence( self ) :
plane1 = GafferScene.Plane()
plane1["name"].setValue( "target" )
plane2 = GafferScene.Plane()
plane2["name"].setValue( "constrained" )
plane1["transform"]["rotate"]["y"].setValue( 45 )
plane2["transform"]["translate"]["x"].setValue( 1 )
parent = GafferScene.Parent()
parent["in"].setInput( plane1["out"] )
parent["parent"].setValue( "/target" )
parent["children"][0].setInput( plane2["out"] )
group = GafferScene.Group()
group["in"][0].setInput( plane1["out"] )
group["in"][1].setInput( plane2["out"] )
constraint = GafferScene.ParentConstraint()
constraint["in"].setInput( group["out"] )
constraint["target"].setValue( "/group/target" )
filter = GafferScene.PathFilter()
filter["paths"].setValue( IECore.StringVectorData( [ "/group/constrained" ] ) )
constraint["filter"].setInput( filter["out"] )
self.assertEqual( parent["out"].fullTransform( "/target/constrained" ), constraint["out"].fullTransform( "/group/constrained" ) )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "fbebd521c37a82ae6c4ee8a313297910",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 173,
"avg_line_length": 32.03649635036496,
"alnum_prop": 0.6746411483253588,
"repo_name": "boberfly/gaffer",
"id": "277192b58c2138c001e7515e3dfb5aa28476bdbe",
"size": "6192",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/GafferSceneTest/ParentConstraintTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41979"
},
{
"name": "C++",
"bytes": "7646009"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "8002810"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import urwid
import urwid.util
import os
from netlib.http import CONTENT_MISSING
import netlib.utils
from .. import utils
from ..models import decoded
from . import signals
try:
import pyperclip
except:
pyperclip = False
VIEW_FLOW_REQUEST = 0
VIEW_FLOW_RESPONSE = 1
METHOD_OPTIONS = [
("get", "g"),
("post", "p"),
("put", "u"),
("head", "h"),
("trace", "t"),
("delete", "d"),
("options", "o"),
("edit raw", "e"),
]
def is_keypress(k):
"""
Is this input event a keypress?
"""
if isinstance(k, basestring):
return True
def highlight_key(str, key, textattr="text", keyattr="key"):
l = []
parts = str.split(key, 1)
if parts[0]:
l.append((textattr, parts[0]))
l.append((keyattr, key))
if parts[1]:
l.append((textattr, parts[1]))
return l
KEY_MAX = 30
def format_keyvals(lst, key="key", val="text", indent=0):
"""
Format a list of (key, value) tuples.
If key is None, it's treated specially:
- We assume a sub-value, and add an extra indent.
- The value is treated as a pre-formatted list of directives.
"""
ret = []
if lst:
maxk = min(max(len(i[0]) for i in lst if i and i[0]), KEY_MAX)
for i, kv in enumerate(lst):
if kv is None:
ret.append(urwid.Text(""))
else:
if isinstance(kv[1], urwid.Widget):
v = kv[1]
elif kv[1] is None:
v = urwid.Text("")
else:
v = urwid.Text([(val, kv[1])])
ret.append(
urwid.Columns(
[
("fixed", indent, urwid.Text("")),
(
"fixed",
maxk,
urwid.Text([(key, kv[0] or "")])
),
v
],
dividechars = 2
)
)
return ret
def shortcuts(k):
if k == " ":
k = "page down"
elif k == "ctrl f":
k = "page down"
elif k == "ctrl b":
k = "page up"
elif k == "j":
k = "down"
elif k == "k":
k = "up"
return k
def fcol(s, attr):
s = unicode(s)
return (
"fixed",
len(s),
urwid.Text(
[
(attr, s)
]
)
)
if urwid.util.detected_encoding:
SYMBOL_REPLAY = u"\u21ba"
SYMBOL_RETURN = u"\u2190"
SYMBOL_MARK = u"\u25cf"
else:
SYMBOL_REPLAY = u"[r]"
SYMBOL_RETURN = u"<-"
SYMBOL_MARK = "[m]"
def raw_format_flow(f, focus, extended, padding):
f = dict(f)
pile = []
req = []
if extended:
req.append(
fcol(
utils.format_timestamp(f["req_timestamp"]),
"highlight"
)
)
else:
req.append(fcol(">>" if focus else " ", "focus"))
if f["marked"]:
req.append(fcol(SYMBOL_MARK, "mark"))
if f["req_is_replay"]:
req.append(fcol(SYMBOL_REPLAY, "replay"))
req.append(fcol(f["req_method"], "method"))
preamble = sum(i[1] for i in req) + len(req) - 1
if f["intercepted"] and not f["acked"]:
uc = "intercept"
elif f["resp_code"] or f["err_msg"]:
uc = "text"
else:
uc = "title"
req.append(
urwid.Text([(uc, f["req_url"])])
)
pile.append(urwid.Columns(req, dividechars=1))
resp = []
resp.append(
("fixed", preamble, urwid.Text(""))
)
if f["resp_code"]:
codes = {
2: "code_200",
3: "code_300",
4: "code_400",
5: "code_500",
}
ccol = codes.get(f["resp_code"] / 100, "code_other")
resp.append(fcol(SYMBOL_RETURN, ccol))
if f["resp_is_replay"]:
resp.append(fcol(SYMBOL_REPLAY, "replay"))
resp.append(fcol(f["resp_code"], ccol))
if f["intercepted"] and f["resp_code"] and not f["acked"]:
rc = "intercept"
else:
rc = "text"
if f["resp_ctype"]:
resp.append(fcol(f["resp_ctype"], rc))
resp.append(fcol(f["resp_clen"], rc))
resp.append(fcol(f["roundtrip"], rc))
elif f["err_msg"]:
resp.append(fcol(SYMBOL_RETURN, "error"))
resp.append(
urwid.Text([
(
"error",
f["err_msg"]
)
])
)
pile.append(urwid.Columns(resp, dividechars=1))
return urwid.Pile(pile)
# Save file to disk
def save_data(path, data, master, state):
if not path:
return
try:
with file(path, "wb") as f:
f.write(data)
except IOError as v:
signals.status_message.send(message=v.strerror)
def ask_save_overwite(path, data, master, state):
if not path:
return
path = os.path.expanduser(path)
if os.path.exists(path):
def save_overwite(k):
if k == "y":
save_data(path, data, master, state)
signals.status_prompt_onekey.send(
prompt = "'" + path + "' already exists. Overwite?",
keys = (
("yes", "y"),
("no", "n"),
),
callback = save_overwite
)
else:
save_data(path, data, master, state)
def ask_save_path(prompt, data, master, state):
signals.status_prompt_path.send(
prompt = prompt,
callback = ask_save_overwite,
args = (data, master, state)
)
def copy_flow_format_data(part, scope, flow):
if part == "u":
data = flow.request.url
else:
data = ""
if scope in ("q", "a"):
if flow.request.content is None or flow.request.content == CONTENT_MISSING:
return None, "Request content is missing"
with decoded(flow.request):
if part == "h":
data += netlib.http.http1.assemble_request(flow.request)
elif part == "c":
data += flow.request.content
else:
raise ValueError("Unknown part: {}".format(part))
if scope == "a" and flow.request.content and flow.response:
# Add padding between request and response
data += "\r\n" * 2
if scope in ("s", "a") and flow.response:
if flow.response.content is None or flow.response.content == CONTENT_MISSING:
return None, "Response content is missing"
with decoded(flow.response):
if part == "h":
data += netlib.http.http1.assemble_response(flow.response)
elif part == "c":
data += flow.response.content
else:
raise ValueError("Unknown part: {}".format(part))
return data, False
def copy_flow(part, scope, flow, master, state):
"""
part: _c_ontent, _h_eaders+content, _u_rl
scope: _a_ll, re_q_uest, re_s_ponse
"""
data, err = copy_flow_format_data(part, scope, flow)
if err:
signals.status_message.send(message=err)
return
if not data:
if scope == "q":
signals.status_message.send(message="No request content to copy.")
elif scope == "s":
signals.status_message.send(message="No response content to copy.")
else:
signals.status_message.send(message="No contents to copy.")
return
# pyperclip calls encode('utf-8') on data to be copied without checking.
# if data are already encoded that way UnicodeDecodeError is thrown.
toclip = ""
try:
toclip = data.decode('utf-8')
except (UnicodeDecodeError):
toclip = data
try:
pyperclip.copy(toclip)
except (RuntimeError, UnicodeDecodeError, AttributeError):
def save(k):
if k == "y":
ask_save_path("Save data", data, master, state)
signals.status_prompt_onekey.send(
prompt = "Cannot copy data to clipboard. Save as file?",
keys = (
("yes", "y"),
("no", "n"),
),
callback = save
)
def ask_copy_part(scope, flow, master, state):
choices = [
("content", "c"),
("headers+content", "h")
]
if scope != "s":
choices.append(("url", "u"))
signals.status_prompt_onekey.send(
prompt = "Copy",
keys = choices,
callback = copy_flow,
args = (scope, flow, master, state)
)
def ask_save_body(part, master, state, flow):
"""
Save either the request or the response body to disk. part can either be
"q" (request), "s" (response) or None (ask user if necessary).
"""
request_has_content = flow.request and flow.request.content
response_has_content = flow.response and flow.response.content
if part is None:
# We first need to determine whether we want to save the request or the
# response content.
if request_has_content and response_has_content:
signals.status_prompt_onekey.send(
prompt = "Save",
keys = (
("request", "q"),
("response", "s"),
),
callback = ask_save_body,
args = (master, state, flow)
)
elif response_has_content:
ask_save_body("s", master, state, flow)
else:
ask_save_body("q", master, state, flow)
elif part == "q" and request_has_content:
ask_save_path(
"Save request content",
flow.request.get_decoded_content(),
master,
state
)
elif part == "s" and response_has_content:
ask_save_path(
"Save response content",
flow.response.get_decoded_content(),
master,
state
)
else:
signals.status_message.send(message="No content to save.")
flowcache = utils.LRUCache(800)
def format_flow(f, focus, extended=False, hostheader=False, padding=2,
marked=False):
d = dict(
intercepted = f.intercepted,
acked = f.reply.acked,
req_timestamp = f.request.timestamp_start,
req_is_replay = f.request.is_replay,
req_method = f.request.method,
req_url = f.request.pretty_url if hostheader else f.request.url,
err_msg = f.error.msg if f.error else None,
resp_code = f.response.status_code if f.response else None,
marked = marked,
)
if f.response:
if f.response.content:
contentdesc = netlib.utils.pretty_size(len(f.response.content))
elif f.response.content == CONTENT_MISSING:
contentdesc = "[content missing]"
else:
contentdesc = "[no content]"
duration = 0
if f.response.timestamp_end and f.request.timestamp_start:
duration = f.response.timestamp_end - f.request.timestamp_start
roundtrip = utils.pretty_duration(duration)
d.update(dict(
resp_code = f.response.status_code,
resp_is_replay = f.response.is_replay,
resp_clen = contentdesc,
roundtrip = roundtrip,
))
t = f.response.headers.get("content-type")
if t:
d["resp_ctype"] = t.split(";")[0]
else:
d["resp_ctype"] = ""
return flowcache.get(
raw_format_flow,
tuple(sorted(d.items())), focus, extended, padding
)
|
{
"content_hash": "0a6a523bf732f4fe5c11aa9fa529392a",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 89,
"avg_line_length": 27.490697674418605,
"alnum_prop": 0.5048642246848828,
"repo_name": "dweinstein/mitmproxy",
"id": "12fdfe27ec2dac0745abfccb661a23bdba1d0161",
"size": "11821",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libmproxy/console/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "425"
},
{
"name": "CSS",
"bytes": "194068"
},
{
"name": "HTML",
"bytes": "2824"
},
{
"name": "JavaScript",
"bytes": "1755960"
},
{
"name": "Python",
"bytes": "666510"
},
{
"name": "Shell",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import base64
import json
import logging
import mimetools
import re
from django.conf.urls import include, patterns, url
from django.dispatch import receiver
from django.utils import six
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.six.moves.urllib.request import (Request as BaseURLRequest,
HTTPBasicAuthHandler,
urlopen)
from django.utils.translation import ugettext_lazy as _
from djblets.registries.errors import ItemLookupError
from djblets.registries.registry import (ALREADY_REGISTERED, LOAD_ENTRY_POINT,
NOT_REGISTERED)
import reviewboard.hostingsvcs.urls as hostingsvcs_urls
from reviewboard.registries.registry import EntryPointRegistry
from reviewboard.signals import initializing
class URLRequest(BaseURLRequest):
def __init__(self, url, body='', headers={}, method='GET'):
BaseURLRequest.__init__(self, url, body, headers)
self.method = method
def get_method(self):
return self.method
class HostingServiceClient(object):
"""Client for communicating with a hosting service's API.
This implementation includes abstractions for performing HTTP operations,
and wrappers for those to interpret responses as JSON data.
HostingService subclasses can also include an override of this class to add
additional checking (such as GitHub's checking of rate limit headers), or
add higher-level API functionality.
"""
def __init__(self, hosting_service):
pass
#
# HTTP utility methods
#
def http_delete(self, url, headers={}, *args, **kwargs):
"""Perform an HTTP DELETE on the given URL."""
return self.http_request(url, headers=headers, method='DELETE',
**kwargs)
def http_get(self, url, *args, **kwargs):
"""Perform an HTTP GET on the given URL."""
return self.http_request(url, method='GET', **kwargs)
def http_post(self, url, body=None, fields={}, files={}, content_type=None,
headers={}, *args, **kwargs):
"""Perform an HTTP POST on the given URL."""
headers = headers.copy()
if body is None:
if fields is not None:
body, content_type = self._build_form_data(fields, files)
else:
body = ''
if content_type:
headers['Content-Type'] = content_type
headers['Content-Length'] = '%d' % len(body)
return self.http_request(url, body=body, headers=headers,
method='POST', **kwargs)
def http_request(self, url, body=None, headers={}, method='GET', **kwargs):
"""Perform some HTTP operation on a given URL."""
r = self._build_request(url, body, headers, method=method, **kwargs)
u = urlopen(r)
return u.read(), u.headers
#
# JSON utility methods
#
def json_delete(self, *args, **kwargs):
"""Perform an HTTP DELETE and interpret the results as JSON."""
return self._do_json_method(self.http_delete, *args, **kwargs)
def json_get(self, *args, **kwargs):
"""Perform an HTTP GET and interpret the results as JSON."""
return self._do_json_method(self.http_get, *args, **kwargs)
def json_post(self, *args, **kwargs):
"""Perform an HTTP POST and interpret the results as JSON."""
return self._do_json_method(self.http_post, *args, **kwargs)
def _do_json_method(self, method, *args, **kwargs):
"""Internal helper for JSON operations."""
data, headers = method(*args, **kwargs)
if data:
data = json.loads(data)
return data, headers
#
# Internal utilities
#
def _build_request(self, url, body=None, headers={}, username=None,
password=None, method='GET'):
"""Build a URLRequest object, including HTTP Basic auth"""
r = URLRequest(url, body, headers, method=method)
if username is not None and password is not None:
if isinstance(username, six.text_type):
username = username.encode('utf-8')
if isinstance(password, six.text_type):
password = password.encode('utf-8')
auth_key = username + b':' + password
r.add_header(HTTPBasicAuthHandler.auth_header,
b'Basic %s' %
base64.b64encode(auth_key))
return r
def _build_form_data(self, fields, files):
"""Encodes data for use in an HTTP POST."""
BOUNDARY = mimetools.choose_boundary()
content = ""
for key in fields:
content += "--" + BOUNDARY + "\r\n"
content += "Content-Disposition: form-data; name=\"%s\"\r\n" % key
content += "\r\n"
content += six.text_type(fields[key]) + "\r\n"
for key in files:
filename = files[key]['filename']
value = files[key]['content']
content += "--" + BOUNDARY + "\r\n"
content += "Content-Disposition: form-data; name=\"%s\"; " % key
content += "filename=\"%s\"\r\n" % filename
content += "\r\n"
content += value + "\r\n"
content += "--" + BOUNDARY + "--\r\n"
content += "\r\n"
content_type = "multipart/form-data; boundary=%s" % BOUNDARY
return content, content_type
class HostingService(object):
"""An interface to a hosting service for repositories and bug trackers.
HostingService subclasses are used to more easily configure repositories
and to make use of third party APIs to perform special operations not
otherwise usable by generic repositories.
A HostingService can specify forms for repository and bug tracker
configuration.
It can also provide a list of repository "plans" (such as public
repositories, private repositories, or other types available to the hosting
service), along with configuration specific to the plan. These plans will
be available when configuring the repository.
"""
name = None
plans = None
supports_bug_trackers = False
supports_post_commit = False
supports_repositories = False
supports_ssh_key_association = False
supports_two_factor_auth = False
supports_list_remote_repositories = False
has_repository_hook_instructions = False
self_hosted = False
repository_url_patterns = None
client_class = HostingServiceClient
#: Optional form used to configure authentication settings for an account.
auth_form = None
# These values are defaults that can be overridden in repository_plans
# above.
needs_authorization = False
supported_scmtools = []
form = None
fields = []
repository_fields = {}
bug_tracker_field = None
def __init__(self, account):
assert account
self.account = account
self.client = self.client_class(self)
def is_authorized(self):
"""Returns whether or not the account is currently authorized.
An account may no longer be authorized if the hosting service
switches to a new API that doesn't match the current authorization
records. This function will determine whether the account is still
considered authorized.
"""
return False
def get_password(self):
"""Returns the raw password for this hosting service.
Not all hosting services provide this, and not all would need it.
It's primarily used when building a Subversion client, or other
SCMTools that still need direct access to the repository itself.
"""
return None
def is_ssh_key_associated(self, repository, key):
"""Returns whether or not the key is associated with the repository.
If the ``key`` (an instance of :py:mod:`paramiko.PKey`) is present
among the hosting service's deploy keys for a given ``repository`` or
account, then it is considered associated. If there is a problem
checking with the hosting service, an :py:exc:`SSHKeyAssociationError`
will be raised.
"""
raise NotImplementedError
def associate_ssh_key(self, repository, key):
"""Associates an SSH key with a given repository
The ``key`` (an instance of :py:mod:`paramiko.PKey`) will be added to
the hosting service's list of deploy keys (if possible). If there
is a problem uploading the key to the hosting service, a
:py:exc:`SSHKeyAssociationError` will be raised.
"""
raise NotImplementedError
def authorize(self, username, password, hosting_url, credentials,
two_factor_auth_code=None, local_site_name=None,
*args, **kwargs):
"""Authorize an account for the hosting service.
Args:
username (unicode):
The username for the account.
password (unicode):
The password for the account.
hosting_url (unicode):
The hosting URL for the service, if self-hosted.
credentials (dict):
All credentials provided by the authentication form. This
will contain the username, password, and anything else
provided by that form.
two_factor_auth_code (unicode, optional):
The two-factor authentication code provided by the user.
local_site_name (unicode, optional):
The Local Site name, if any, that the account should be
bound to.
*args (tuple):
Extra unused positional arguments.
**kwargs (dict):
Extra keyword arguments containing values from the
repository's configuration.
Raises:
reviewboard.hostingsvcs.errors.AuthorizationError:
The credentials provided were not valid.
reviewboard.hostingsvcs.errors.TwoFactorAuthCodeRequiredError:
A two-factor authentication code is required to authorize
this account. The request must be retried with the same
credentials and with the ``two_factor_auth_code`` parameter
provided.
"""
raise NotImplementedError
def check_repository(self, path, username, password, scmtool_class,
local_site_name, *args, **kwargs):
"""Checks the validity of a repository configuration.
This performs a check against the hosting service or repository
to ensure that the information provided by the user represents
a valid repository.
This is passed in the repository details, such as the path and
raw credentials, as well as the SCMTool class being used, the
LocalSite's name (if any), and all field data from the
HostingServiceForm as keyword arguments.
"""
return scmtool_class.check_repository(path, username, password,
local_site_name)
def get_file(self, repository, path, revision, *args, **kwargs):
if not self.supports_repositories:
raise NotImplementedError
return repository.get_scmtool().get_file(path, revision, **kwargs)
def get_file_exists(self, repository, path, revision, *args, **kwargs):
if not self.supports_repositories:
raise NotImplementedError
return repository.get_scmtool().file_exists(path, revision, **kwargs)
def get_branches(self, repository):
"""Get a list of all branches in the repositories.
This should be implemented by subclasses, and is expected to return a
list of Branch objects. One (and only one) of those objects should have
the "default" field set to True.
"""
raise NotImplementedError
def get_commits(self, repository, branch=None, start=None):
"""Get a list of commits backward in history from a given point.
This should be implemented by subclasses, and is expected to return a
list of Commit objects (usually 30, but this is flexible depending on
the limitations of the APIs provided.
This can be called multiple times in succession using the "parent"
field of the last entry as the start parameter in order to paginate
through the history of commits in the repository.
"""
raise NotImplementedError
def get_change(self, repository, revision):
"""Get an individual change.
This should be implemented by subclasses, and is expected to return a
tuple of (commit message, diff), both strings.
"""
raise NotImplementedError
def get_remote_repositories(self, owner=None, owner_type=None,
filter_type=None, start=None, per_page=None):
"""Get a list of remote repositories for the owner.
This should be implemented by subclasses, and is expected to return an
APIPaginator providing pages of RemoteRepository objects.
The ``start`` and ``per_page`` parameters can be used to control
where pagination begins and how many results are returned per page,
if the subclass supports it.
``owner`` is expected to default to a reasonable value (typically
the linked account's username). The hosting service may also require
an ``owner_type`` value that identifies what the ``owner`` means.
This value is specific to the hosting service backend.
Likewise, ``filter_type`` is specific to the hosting service backend.
If supported, it may be used to filter the types of hosting services.
"""
raise NotImplementedError
def get_remote_repository(self, repository_id):
"""Get the remote repository for the ID.
This should be implemented by subclasses, and is expected to return
a RemoteRepository if found, or raise ObjectDoesNotExist if not found.
"""
raise NotImplementedError
@classmethod
def get_repository_fields(cls, username, hosting_url, plan, tool_name,
field_vars):
if not cls.supports_repositories:
raise NotImplementedError
# Grab the list of fields for population below. We have to do this
# differently depending on whether or not this hosting service has
# different repository plans.
fields = cls._get_field(plan, 'repository_fields')
new_vars = field_vars.copy()
new_vars['hosting_account_username'] = username
if cls.self_hosted:
new_vars['hosting_url'] = hosting_url
new_vars['hosting_domain'] = urlparse(hosting_url)[1]
results = {}
assert tool_name in fields
for field, value in six.iteritems(fields[tool_name]):
try:
results[field] = value % new_vars
except KeyError as e:
logging.error('Failed to generate %s field for hosting '
'service %s using %s and %r: Missing key %s'
% (field, six.text_type(cls.name), value,
new_vars, e),
exc_info=1)
raise KeyError(
_('Internal error when generating %(field)s field '
'(Missing key "%(key)s"). Please report this.') % {
'field': field,
'key': e,
})
return results
def get_repository_hook_instructions(self, request, repository):
"""Returns instructions for setting up incoming webhooks.
Subclasses can override this (and set
`has_repository_hook_instructions = True` on the subclass) to provide
instructions that administrators can see when trying to configure an
incoming webhook for the hosting service.
This is expected to return HTML for the instructions. The function
is responsible for escaping any content.
"""
raise NotImplementedError
@classmethod
def get_bug_tracker_requires_username(cls, plan=None):
if not cls.supports_bug_trackers:
raise NotImplementedError
return ('%(hosting_account_username)s' in
cls._get_field(plan, 'bug_tracker_field', ''))
@classmethod
def get_bug_tracker_field(cls, plan, field_vars):
if not cls.supports_bug_trackers:
raise NotImplementedError
bug_tracker_field = cls._get_field(plan, 'bug_tracker_field')
if not bug_tracker_field:
return ''
try:
return bug_tracker_field % field_vars
except KeyError as e:
logging.error('Failed to generate %s field for hosting '
'service %s using %r: Missing key %s'
% (bug_tracker_field, six.text_type(cls.name),
field_vars, e),
exc_info=1)
raise KeyError(
_('Internal error when generating %(field)s field '
'(Missing key "%(key)s"). Please report this.') % {
'field': bug_tracker_field,
'key': e,
})
@classmethod
def _get_field(cls, plan, name, default=None):
if cls.plans:
assert plan
for plan_name, info in cls.plans:
if plan_name == plan and name in info:
return info[name]
return getattr(cls, name, default)
_hostingsvcs_urlpatterns = {}
class HostingServiceRegistry(EntryPointRegistry):
"""A registry for managing hosting services."""
entry_point = 'reviewboard.hosting_services'
lookup_attrs = ['hosting_service_id']
errors = {
ALREADY_REGISTERED: _(
'"%(item)s" is already a registered hosting service.'
),
LOAD_ENTRY_POINT: _(
'Unable to load repository hosting service %(entry_point)s: '
'%(error)s.'
),
NOT_REGISTERED: _(
'"%(attr_value)s" is not a registered hosting service.'
),
}
def __init__(self):
super(HostingServiceRegistry, self).__init__()
self._url_patterns = {}
def unregister(self, service):
"""Unregister a hosting service.
This will remove all registered URLs that the hosting service has
defined.
Args:
service (type):
The
:py:class:`~reviewboard.hostingsvcs.service.HostingService`
subclass.
"""
super(HostingServiceRegistry, self).unregister(service)
if service.hosting_service_id in self._url_patterns:
cls_urlpatterns = self._url_patterns[service.hosting_service_id]
hostingsvcs_urls.dynamic_urls.remove_patterns(cls_urlpatterns)
del self._url_patterns[service.hosting_service_id]
def process_value_from_entry_point(self, entry_point):
"""Load the class from the entry point.
The ``id`` attribute will be set on the class from the entry point's
name.
Args:
entry_point (pkg_resources.EntryPoint):
The entry point.
Returns:
type:
The :py:class:`HostingService` subclass.
"""
cls = entry_point.load()
cls.hosting_service_id = entry_point.name
return cls
def register(self, service):
"""Register a hosting service.
This also adds the URL patterns defined by the hosting service. If the
hosting service has a :py:attr:`HostingService.repository_url_patterns`
attribute that is non-``None``, they will be automatically added.
Args:
service (type):
The :py:class:`HostingService` subclass.
"""
super(HostingServiceRegistry, self).register(service)
if service.repository_url_patterns:
cls_urlpatterns = patterns(
'',
url(r'^(?P<hosting_service_id>%s)/'
% re.escape(service.hosting_service_id),
include(service.repository_url_patterns)))
self._url_patterns[service.hosting_service_id] = cls_urlpatterns
hostingsvcs_urls.dynamic_urls.add_patterns(cls_urlpatterns)
_hosting_service_registry = HostingServiceRegistry()
def get_hosting_services():
"""Return the list of hosting services.
Returns:
list:
The :py:class:`~reviewboard.hostingsvcs.service.HostingService`
subclasses.
"""
return list(_hosting_service_registry)
def get_hosting_service(name):
"""Return the hosting service with the given name.
If the hosting service is not found, None will be returned.
"""
try:
return _hosting_service_registry.get('hosting_service_id', name)
except ItemLookupError:
return None
def register_hosting_service(name, cls):
"""Register a custom hosting service class.
A name can only be registered once. A KeyError will be thrown if attempting
to register a second time.
Args:
name (unicode):
The name of the hosting service.
cls (type):
The hosting service class. This should be a subclass of
:py:class:`~reviewboard.hostingsvcs.service.HostingService`.
"""
cls.hosting_service_id = name
_hosting_service_registry.register(cls)
def unregister_hosting_service(name):
"""Unregister a previously registered hosting service.
Args:
name (unicode):
The name of the hosting service.
"""
try:
_hosting_service_registry.unregister_by_attr('hosting_service_id',
name)
except ItemLookupError as e:
logging.error('Failed to unregister unknown hosting service "%s"'
% name)
raise e
@receiver(initializing, dispatch_uid='populate_hosting_services')
def _on_initializing(**kwargs):
_hosting_service_registry.populate()
|
{
"content_hash": "e7aefcc9758163dfd2ffdeecaaf19d5f",
"timestamp": "",
"source": "github",
"line_count": 630,
"max_line_length": 79,
"avg_line_length": 35.63333333333333,
"alnum_prop": 0.6100939908236447,
"repo_name": "sgallagher/reviewboard",
"id": "2eb1efcc6eae71fd99ee4efcc819ae19e81017f9",
"size": "22449",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "reviewboard/hostingsvcs/service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "225650"
},
{
"name": "HTML",
"bytes": "185770"
},
{
"name": "JavaScript",
"bytes": "2121168"
},
{
"name": "Python",
"bytes": "4153859"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
}
|
"""xla is an experimental library that provides XLA support APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.compiler.jit.ops import xla_ops
from tensorflow.compiler.jit.ops import xla_ops_grad # pylint: disable=unused-import
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.distribute import summary_op_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
_XLA_COMPILE_ATTR = '_xla_compile_id'
_MAX_WARNING_LINES = 5
# Operations that indicate some error in the users graph. For example, XLA
# computation should not have any Placeholder op.
_BLACKLISTED_OPS = set([
'Placeholder',
])
# XLA doesn't currently support reading of intermediate tensors, thus some ops
# are not supported.
_UNSUPPORTED_OPS = set([
'AudioSummary',
'AudioSummaryV2',
'HistogramSummary',
'ImageSummary',
'MergeSummary',
'Print',
'ScalarSummary',
'TensorSummary',
'TensorSummaryV2',
])
@tf_export('xla.experimental.compile')
def compile(computation, inputs=None): # pylint: disable=redefined-builtin
"""Builds an operator that compiles and runs `computation` with XLA.
NOTE: In eager mode, `computation` will have `@tf.function` semantics.
Args:
computation: A Python function that builds a computation to apply to the
input. If the function takes n inputs, 'inputs' should be a list of n
tensors.
`computation` may return a list of operations and tensors. Tensors must
come before operations in the returned list. The return value of
`compile` is a list of tensors corresponding to the tensors from the
output of `computation`.
All `Operation`s returned from `computation` will be executed when
evaluating any of the returned output tensors.
inputs: A list of inputs or `None` (equivalent to an empty list). Each input
can be a nested structure containing values that are convertible to
tensors. Note that passing an N-dimension list of compatible values will
result in a N-dimension list of scalar tensors rather than a single Rank-N
tensors. If you need different behavior, convert part of inputs to tensors
with `tf.convert_to_tensor`.
Returns:
Same data structure as if computation(*inputs) is called directly with some
exceptions for correctness. Exceptions include:
1) None output: a NoOp would be returned which control-depends on
computation.
2) Single value output: A tuple containing the value would be returned.
3) Operation-only outputs: a NoOp would be returned which
control-depends on computation.
TODO(b/121383831): Investigate into removing these special cases.
Raises:
RuntimeError: if called when eager execution is enabled.
"""
if context.executing_eagerly():
@def_function.function
def xla_compile_wrapper():
return _compile_internal(computation, inputs)
return xla_compile_wrapper()
return _compile_internal(computation, inputs)
class XLACompileContext(control_flow_ops.XLAControlFlowContext):
"""A `ControlFlowContext` for nodes inside an XLA computation cluster.
THIS IS ONLY FOR TENSORFLOW INTERNAL IMPLEMENTATION, DO NO USE DIRECTLY.
The primary role of `XLACompileContext` is to mark operators inside a
xla.compile() computation with attribute "_xla_compile_id=XYZ", where XYZ is
a unique name.
`ControlFlowContext` is used to perform the annotation since it integrates
with Tensorflow constructs like ResourceVariables. For example, if a
`ResourceVariable` is constructed inside a xla.compile() block, the
`ResourceVariable` implementation can use
`with ops.control_dependencies(None)` to build the variable's definition
outside the compiled computation.
"""
def __init__(self, name, pivot):
"""Builds a new XLACompileContext.
Args:
name: a unique name for the context, used to populate the
`_xla_compile_id` attribute.
pivot: a pivot node. Nodes in the XLACompileContext that do not have any
inputs will have a control dependency on the pivot node. This ensures
that nodes are correctly included in any enclosing control flow
contexts.
"""
super(XLACompileContext, self).__init__()
self._name = name
self._name_as_bytes = compat.as_bytes(name)
self._unsupported_ops = []
self._pivot = pivot
def report_unsupported_operations(self):
if self._unsupported_ops:
op_str = '\n'.join([
' %s (%s)' % (op.type, op.name)
for op in self._unsupported_ops[:_MAX_WARNING_LINES]
])
logging.warning('%d unsupported operations found: \n%s',
len(self._unsupported_ops), op_str)
if len(self._unsupported_ops) > _MAX_WARNING_LINES:
logging.warning('... and %d more',
len(self._unsupported_ops) - _MAX_WARNING_LINES)
def _RemoveExternalControlEdges(self, op):
"""Remove any external control dependency on this op."""
internal_control_inputs = []
external_control_inputs = []
for x in op.control_inputs:
# pylint: disable=protected-access
is_internal_op = False
ctxt = x._get_control_flow_context()
while ctxt is not None:
if ctxt == self:
is_internal_op = True
break
ctxt = ctxt._outer_context
if is_internal_op:
internal_control_inputs.append(x)
else:
external_control_inputs.append(x)
# pylint: enable=protected-access
# pylint: disable=protected-access
op._remove_all_control_inputs()
op._add_control_inputs(internal_control_inputs)
# pylint: enable=protected-access
return internal_control_inputs, external_control_inputs
def AddOp(self, op):
"""Create op in XLACompileContext and notifies outer context recursively."""
# pylint: disable=protected-access
if op.type in _BLACKLISTED_OPS:
logging.error(
'Operation of type %s (%s) is not supported in XLA. Execution will '
'fail if this op is used in the graph. ', op.type, op.name)
# TODO(ycao): Automatically disable summaries instead of reporting them.
if op.type in _UNSUPPORTED_OPS:
self._unsupported_ops.append(op)
if any(x.dtype._is_ref_dtype for x in op.inputs):
raise NotImplementedError(
'Non-resource Variables are not supported inside XLA computations '
'(operator name: %s)' % op.name)
if _XLA_COMPILE_ATTR in op.node_def.attr:
raise ValueError('XLA compiled computations cannot be nested, (operator '
'name: %s)' % op.name)
op._set_attr(
_XLA_COMPILE_ATTR, attr_value_pb2.AttrValue(s=self._name_as_bytes))
op.graph.prevent_feeding(op)
op.graph.prevent_fetching(op)
# Remove any control edges from outer control flow contexts. These may cause
# mismatched frame errors. An example is when one of op's inputs is
# generated in a different While control flow context.
(internal_control_inputs,
external_control_inputs) = self._RemoveExternalControlEdges(op)
if not op.inputs:
# Add a control edge from the control pivot to this op.
if not internal_control_inputs:
# pylint: disable=protected-access
op._add_control_input(self._pivot)
# pylint: enable=protected-access
else:
for index in xrange(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x is not x:
op._update_input(index, real_x) # pylint: disable=protected-access
if external_control_inputs:
# Use an identity to pull control inputs as data inputs. Note that we
# ignore ops which don't have outputs. TODO(phawkins): fix that.
with ops.control_dependencies(None):
self.Enter()
external_control_inputs = [
array_ops.identity(x.outputs[0]).op
for x in external_control_inputs
if x.outputs
]
self.Exit()
# pylint: disable=protected-access
op._add_control_inputs(external_control_inputs)
# pylint: enable=protected-access
# Mark op's outputs as seen by this context and any outer contexts.
output_names = [x.name for x in op.outputs]
context = self
while context is not None:
# pylint: disable=protected-access
context._values.update(output_names)
context = context._outer_context
# pylint: enable=protected-access
if self._outer_context:
self._outer_context.AddInnerOp(op)
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
if val.name in self._values:
# Use the real value if it comes from outer context.
result = self._external_values.get(val.name)
return val if result is None else result
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddInnerOp(self, op):
self.AddOp(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
@property
def grad_state(self):
# Define the gradient loop state associated with the XLACompileContext to
# be None as the XLACompileContext does not get nested nor does the
# grad_state outside the XLACompileContext affect the graph inside so the
# grad_state should be as if this is the top-level gradient state.
return None
@property
def back_prop(self):
"""Forwards to the enclosing while context, if any."""
if self.GetWhileContext():
return self.GetWhileContext().back_prop
return False
def _compile_internal(computation, inputs=None):
"""Builds graph operators that compiles and symbolically executes computation.
Args:
computation: A Python function that builds the computation to compile and
execute.
inputs: A list of inputs or `None` (equivalent to an empty list). Each input
can be a nested structure containing values that are convertible to
tensors. Note that passing an N-dimension list of compatible values will
result in a N-dimension list of scalar tensors rather than a single Rank-N
tensors. If you need different behavior, convert part of inputs to tensors
with `tf.convert_to_tensor`.
Returns:
Same data structure as if computation(*inputs) is called directly with some
exceptions for correctness. Exceptions include: 1) None output 2) Single
value output 3) Operation-only outputs
Raises:
ValueError: If any element in computation outputs is neither an operations
or a value that can be converted to tensor.
ValueError: If computation outputs is non-flat and contains any Operations.
TypeError: If `inputs` is not a list or tuple.
"""
if inputs is None:
inputs = []
if not isinstance(inputs, collections.Sequence):
raise TypeError('inputs must be a list')
# Flatten inputs.
flat_inputs = nest.flatten(inputs)
# Converts inputs to Tensors.
flat_inputs = [ops.convert_to_tensor(x) for x in flat_inputs]
cluster_name = ops.get_default_graph().unique_name('cluster')
pivot = control_flow_ops.no_op(name=cluster_name + '/pivot')
context = XLACompileContext(name=cluster_name, pivot=pivot)
try:
context.Enter()
# Add identity ops so even unused inputs are 'consumed' by the
# computation.
flat_inputs = [
array_ops.identity(x, name='input_{}'.format(i))
for i, x in enumerate(flat_inputs)
]
# Re-pack flat_inputs in same structure as 'inputs'.
computation_inputs = nest.pack_sequence_as(
structure=inputs, flat_sequence=flat_inputs)
# Only resource variables work inside an XLA computation, so turn on
# resource variables for the computation.
vscope = variable_scope.get_variable_scope()
saved_use_resource = vscope.use_resource
vscope.set_use_resource(True)
with _disable_summary_context():
outputs = computation(*computation_inputs)
# Restore variable scope after computation.
vscope.set_use_resource(saved_use_resource)
outputs_is_flat = is_flat(outputs)
if outputs_is_flat:
output_tensors, control_deps = _postprocess_flat_outputs(outputs)
else:
output_tensors, control_deps = _postprocess_non_flat_outputs(outputs)
context.ExitResult(output_tensors)
finally:
context.report_unsupported_operations()
context.Exit()
# When XLA computation returns only operations and no tensors, a NoOp
# dependent on the operations in outputs is returned. Otherwise final
# outputs would be empty and there is no way to trigger returned
# operations.
if not output_tensors:
return control_flow_ops.group(control_deps, name='output_0')
output_tensors = [
xla_ops.xla_cluster_output(o, name='output{}'.format(i))
for i, o in enumerate(output_tensors)
]
with ops.control_dependencies(control_deps):
# Wraps the outputs in identity operators that carries control
# dependencies.
output_tensors = [
array_ops.identity(o, name='output_%d' % i)
for i, o in enumerate(output_tensors)
]
# If `computation` returned non-flat output structure, pack output tensors
# back into same structure.
if not outputs_is_flat:
output_tensors = nest.pack_sequence_as(
structure=outputs, flat_sequence=output_tensors)
return output_tensors
def is_flat(outputs):
"""Checks if outputs is a flat structure.
Following structures and values are considered flat:
1) None
2) A single object
3) A list or tuple of Tensors/Operations
The only structures that this function understands are sequences and
dictionaries. E.g. this means that if outputs contains a single
user-defined Object, it is considered to be flat. Errors are raised later on
if that Object cannot be converted to a Tensor.
Args:
outputs: Output from `computation` inside `xla.compile`.
Returns:
A boolean indicates whether outputs is flat.
"""
# If outputs is a list or tuple, check if it has any nested structure. If
# there is, then outputs is non-flat.
if isinstance(outputs, collections.Sequence):
for o in outputs:
if isinstance(o, collections.Sequence) or isinstance(o, dict):
return False
# If outputs is a dict, it is non-flat.
if isinstance(outputs, dict):
return False
# Getting here means either outputs itself is a single non-structured value
# or it is a flat list of single non-structured values.
return True
def _postprocess_flat_outputs(outputs):
"""Validates flat outputs and adds back device assignments.
Args:
outputs: Output from `computation` inside `xla.compile`.
Returns:
Tensors and Operations extracted from outputs.
"""
# Following code segment is to preserve legacy behavior. Previously we only
# supported flat outputs and thus for consistency it was nice to convert even
# single element into a tuple. But now that we support arbitrary output
# structure, this is no longer necessary.
# TODO(b/121383831): Migrate all legacy use cases and delete this special
# case.
# If the computation returns `None`, make it an empty tuple.
if outputs is None:
outputs = tuple()
# If the computation only returned one value, make it a tuple.
if not isinstance(outputs, collections.Sequence):
outputs = (outputs,)
# Append `no_op` here so that return value of this function always contains
# at least one op that can trigger XlaLaunch node.
outputs += (control_flow_ops.no_op(),)
try:
outputs = [
o if isinstance(o, ops.Operation) else ops.convert_to_tensor(o)
for o in outputs
]
except Exception as e:
raise ValueError(
'XLA computation function return values must all either be Operations'
' or convertible to Tensors. Got error: "%s"' % str(e))
# Separates the returned Operations and Tensors.
output_operations = [o for o in outputs if isinstance(o, ops.Operation)]
output_tensors = [o for o in outputs if not isinstance(o, ops.Operation)]
if outputs != output_tensors + output_operations:
raise ValueError(
'XLA computation function must return zero or more Tensor values '
'followed by zero or more Operations.')
new_output_tensors = []
for t in output_tensors:
with ops.device(t.device if t.device else ''):
new_output_tensors.append(array_ops.identity(t))
return new_output_tensors, output_operations
def _postprocess_non_flat_outputs(outputs):
"""Validates non-flat outputs and adds back device assignments.
Args:
outputs: Output from `computation` inside `xla.compile`.
Returns:
Tensors extracted from outputs and an empty list because Operations are not
allowed in non-flat outputs..
"""
# Convert all non-Operation outputs to Tensors.
new_output_tensors = []
for o in nest.flatten(outputs):
if isinstance(o, ops.Operation):
raise ValueError(
'xla.compile does not support Operation as return value in non-flat '
'output structure. You can set returned Operations as control '
'dependencies of returned Tensors so Operations are triggered when '
'Tensors are evaluated. Operation found: "%s"' % o.name)
try:
o = ops.convert_to_tensor(o)
except Exception as e:
raise ValueError(
'XLA computation function return values must all either be '
'Operations or convertible to Tensors. Got error: "%s"' % str(e))
# Makes sure even pass-through inputs/outputs are touched in compile
# context by creating an Identity node inside compile context.
with ops.device(o.device if o.device else ''):
new_output_tensors.append(array_ops.identity(o))
return new_output_tensors, []
@contextlib.contextmanager
def _disable_summary_context():
"""Enters a context where all summary ops are skipped.
Summaries are not yet supported in xla.compile(). So we provide this context
manager that can skip creating summary ops. This is a temporary workaround due
to XLA not supporting summary ops.
Yields:
None.
"""
original_skip_summary_func = summary_op_util.skip_summary
summary_op_util.skip_summary = lambda: True
try:
yield
finally:
summary_op_util.skip_summary = original_skip_summary_func
class _CapturedObject(object):
"""A placeholder to capture an object."""
def __init__(self):
self._object = None
def capture(self, o):
if self._object:
raise RuntimeError(
'InternalError: _CapturedObject can capture only once. Please file '
'bug.')
self._object = o
def get(self):
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
scaffold_fn = captured_scaffold_fn.get()
if not scaffold_fn:
return None
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
return scaffold
def check_function_argument_count(func, input_arity, infeed_queue):
"""Validate the number of input arguments to an XLA function.
Args:
func: the Python function that will be called to generate the body of an XLA
computation graph.
input_arity: the number of explicit arguments supplied by the caller.
infeed_queue: if not None, the infeed queue that will supply
additional arguments to the function.
Returns:
None if function can be called with the supplied number of
arguments, or an error string if it cannot.
"""
def format_error(complaint, quantity):
return '%s %d argument%s' % (complaint, quantity, ''
if quantity == 1 else 's')
num_args_supplied = input_arity
if infeed_queue is not None:
num_args_supplied += infeed_queue.number_of_tuple_elements
arg_spec = tf_inspect.getargspec(func)
num_func_args = len(arg_spec.args)
if arg_spec.defaults is None:
num_func_defaults = 0
else:
num_func_defaults = len(arg_spec.defaults)
min_func_args = num_func_args - num_func_defaults
if num_args_supplied < min_func_args:
# The required number of arguments is not enough to call the function.
if num_func_defaults == 0 and arg_spec.varargs is None:
return format_error('exactly', num_func_args)
else:
return format_error('at least', min_func_args)
if arg_spec.varargs is None and num_args_supplied > num_func_args:
# The required number of arguments is too many to call the function.
if num_func_defaults == 0:
return format_error('exactly', num_func_args)
else:
return format_error('at most', num_func_args)
# Reaching here means either
# 1) There are varargs, func can accept any number of arguments greater than
# the minimum.
# 2) Number of supplied arguments falls in range of acceptable argument count
# of func.
return None
|
{
"content_hash": "211bfada010909138cc1e4027bc39fb7",
"timestamp": "",
"source": "github",
"line_count": 603,
"max_line_length": 85,
"avg_line_length": 35.966832504145934,
"alnum_prop": 0.6984046477314644,
"repo_name": "chemelnucfin/tensorflow",
"id": "55bfaeb39312c950d4adbaa5f3cf0ecddb4faca6",
"size": "22376",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/compiler/xla/xla.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "16146"
},
{
"name": "C",
"bytes": "825231"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "75313939"
},
{
"name": "CMake",
"bytes": "207856"
},
{
"name": "Dockerfile",
"bytes": "80130"
},
{
"name": "Go",
"bytes": "1670422"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "881711"
},
{
"name": "Jupyter Notebook",
"bytes": "1113647"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "853297"
},
{
"name": "Makefile",
"bytes": "109340"
},
{
"name": "Objective-C",
"bytes": "105235"
},
{
"name": "Objective-C++",
"bytes": "258793"
},
{
"name": "PHP",
"bytes": "38007"
},
{
"name": "Pascal",
"bytes": "3741"
},
{
"name": "Pawn",
"bytes": "14380"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "50825074"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4706"
},
{
"name": "Shell",
"bytes": "532610"
},
{
"name": "Smarty",
"bytes": "31460"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
""" ChassisSlot sets up a structure for tracking position within a chassis. """
from sqlalchemy import Column, Integer, ForeignKey, PrimaryKeyConstraint
from sqlalchemy.orm import relation, backref
from aquilon.aqdb.model import Base, Machine, Chassis
_TN = 'chassis_slot'
class ChassisSlot(Base):
""" ChassisSlot allows a Machine to be assigned to each unique position
within a Chassis. """
__tablename__ = _TN
chassis_id = Column(Integer, ForeignKey('chassis.hardware_entity_id',
name='%s_chassis_fk' % _TN,
ondelete='CASCADE'),
nullable=False)
slot_number = Column(Integer, nullable=False, autoincrement=False)
# TODO: Code constraint that these are Blades...
machine_id = Column(Integer, ForeignKey('machine.machine_id',
name='%s_machine_fk' % _TN),
nullable=True)
# TODO: need a unique key against this, but what if it takes 2 slots?
# TODO: remove delete-orphan?
chassis = relation(Chassis, innerjoin=True,
backref=backref('slots', cascade='delete, delete-orphan',
order_by=[slot_number]))
# No delete-orphan here, it's fine to leave the slot in place even if the
# machine is removed
machine = relation(Machine,
backref=backref('chassis_slot', cascade='all'))
__table_args__ = (PrimaryKeyConstraint(chassis_id, slot_number),)
|
{
"content_hash": "bf972b09c52441721238680fd8ad26f4",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 80,
"avg_line_length": 40.23076923076923,
"alnum_prop": 0.5927342256214149,
"repo_name": "stdweird/aquilon",
"id": "d1c7e3e30c022fcbb7664fafd495f999aacb469a",
"size": "2277",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python2.6/aquilon/aqdb/model/chassis_slot.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "3791"
},
{
"name": "Makefile",
"bytes": "5024"
},
{
"name": "Mako",
"bytes": "3996"
},
{
"name": "PLSQL",
"bytes": "69088"
},
{
"name": "Perl",
"bytes": "5030"
},
{
"name": "Python",
"bytes": "4257490"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "22083"
}
],
"symlink_target": ""
}
|
"""Common utilities for the tests
"""
import time
import unittest
import random
random.seed()
from antevents.base import IterableAsPublisher, DefaultSubscriber, FatalError,\
SensorEvent
class RandomSensor:
def __init__(self, sensor_id, mean=100.0, stddev=20.0, stop_after_events=None):
self.sensor_id = sensor_id
self.mean = mean
self.stddev = stddev
self.stop_after_events = stop_after_events
if stop_after_events is not None:
def generator():
for i in range(stop_after_events):
yield random.gauss(mean, stddev)
else: # go on forever
def generator():
while True:
yield random.gauss(mean, stddev)
self.generator = generator()
def sample(self):
return self.generator.__next__()
def __repr__(self):
if self.stop_after_events is None:
return 'RandomSensor(%s, mean=%s, stddev=%s)' % \
(self.sensor_id, self.mean, self.stddev)
else:
return 'RandomSensor(%s, mean=%s, stddev=%s, stop_after_events=%s)' % \
(self.sensor_id, self.mean, self.stddev, self.stop_after_events)
class ValueListSensor:
def __init__(self, sensor_id, values):
self.sensor_id = sensor_id
def generator():
for v in values:
yield v
self.generator = generator()
def sample(self):
return self.generator.__next__()
def __repr__(self):
return 'ValueListSensor(%s)' % self.sensor_id
def make_test_publisher(sensor_id, mean=100.0, stddev=20.0, stop_after_events=None):
"""Here is an exmple test publisher that generates a random value"""
if stop_after_events is not None:
def generator():
for i in range(stop_after_events):
yield SensorEvent(sensor_id, time.time(),
random.gauss(mean, stddev))
else: # go on forever
def generator():
while True:
yield SensorEvent(sensor_id, time.time(),
random.gauss(mean, stddev))
g = generator()
o = IterableAsPublisher(g, name='Sensor(%s)' % sensor_id)
return o
def make_test_publisher_from_vallist(sensor_id, values):
"""Create a publisher that generates the list of values when sampled, but uses
real timestamps.
"""
def generator():
for val in values:
yield SensorEvent(sensor_id, time.time(), val)
o = IterableAsPublisher(generator(), name='Sensor(%s)' % sensor_id)
return o
class ValidationSubscriber(DefaultSubscriber):
"""Compare the values in a event stream to the expected values.
Use the test_case for the assertions (for proper error reporting in a unit
test).
"""
def __init__(self, expected_stream, test_case,
extract_value_fn=lambda event:event.val):
self.expected_stream = expected_stream
self.next_idx = 0
self.test_case = test_case # this can be either a method or a class
self.extract_value_fn = extract_value_fn
self.completed = False
self.name = "ValidationSubscriber(%s)" % \
test_case.__class__.__name__ \
if isinstance(test_case, unittest.TestCase) \
else "ValidationSubscriber(%s.%s)" % \
(test_case.__self__.__class__.__name__,
test_case.__name__)
def on_next(self, x):
tcls = self.test_case if isinstance(self.test_case, unittest.TestCase)\
else self.test_case.__self__
tcls.assertLess(self.next_idx, len(self.expected_stream),
"Got an event after reaching the end of the expected stream")
expected = self.expected_stream[self.next_idx]
actual = self.extract_value_fn(x)
tcls.assertEqual(actual, expected,
"Values for element %d of event stream mismatch" %
self.next_idx)
self.next_idx += 1
def on_completed(self):
tcls = self.test_case if isinstance(self.test_case, unittest.TestCase)\
else self.test_case.__self__
tcls.assertEqual(self.next_idx, len(self.expected_stream),
"Got on_completed() before end of stream")
self.completed = True
def on_error(self, exc):
tcls = self.test_case if isinstance(self.test_case, unittest.TestCase)\
else self.test_case.__self__
tcls.assertTrue(False,
"Got an unexpected on_error call with parameter: %s" %
exc)
def __repr__(self):
return self.name
class SensorEventValidationSubscriber(DefaultSubscriber):
"""Compare the full events in a sensor event stream to the expected events.
Use the test_case for the assertions (for proper error reporting in a unit
test).
"""
def __init__(self, expected_sensor_events, test_case):
self.expected_sensor_events = expected_sensor_events
self.next_idx = 0
self.test_case = test_case
self.completed = False
def on_next(self, x):
tc = self.test_case
tc.assertLess(self.next_idx, len(self.expected_sensor_events),
"Got an event after reaching the end of the expected stream")
expected = self.expected_sensor_events[self.next_idx]
actual = x
tc.assertEqual(actual.val, expected.val,
"Values for element %d of event stream mismatch" % self.next_idx)
tc.assertEqual(actual.sensor_id, expected.sensor_id,
"sensor ids for element %d of event stream mismatch" % self.next_idx)
# since the timestamp is a floating point number, we only check that
# the timestamps are "close enough"
tc.assertAlmostEqual(actual.ts, expected.ts, places=5,
msg="Timestamps for element %d of event stream mismatch" % self.next_idx)
self.next_idx += 1
def on_completed(self):
tc = self.test_case
tc.assertEqual(self.next_idx, len(self.expected_sensor_events),
"Got on_completed() before end of stream")
self.completed = True
def on_error(self, exc):
tc = self.test_case
tc.assertTrue(False,
"Got an unexpected on_error call with parameter: %s" % exc)
class ValidateAndStopSubscriber(ValidationSubscriber):
"""A version of ValidationSubscriber that calls a stop
function after the specified events have been received.
"""
def __init__(self, expected_stream, test_case, stop_fn,
extract_value_fn=lambda event:event.val):
super().__init__(expected_stream, test_case,
extract_value_fn=extract_value_fn)
self.stop_fn = stop_fn
def on_next(self, x):
super().on_next(x)
if self.next_idx==len(self.expected_stream):
print("ValidateAndStopSubscriber: stopping")
self.stop_fn()
class CaptureSubscriber(DefaultSubscriber):
"""Capture the sequence of events in a list for later use.
"""
def __init__(self):
self.events = []
self.completed = False
def on_next(self, x):
self.events.append(x)
def on_completed(self):
self.completed = True
def on_error(self, e):
raise FatalError("Should not get on_error, got on_error(%s)" % e)
|
{
"content_hash": "8ad852a79c9242f2b8832ecee6bad72a",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 102,
"avg_line_length": 37.79,
"alnum_prop": 0.5903678221751786,
"repo_name": "mpi-sws-rse/antevents-python",
"id": "bf2a7fc17edf725d9591443a41905a92f4d9eb54",
"size": "7650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "351"
},
{
"name": "Python",
"bytes": "242029"
},
{
"name": "Shell",
"bytes": "6508"
}
],
"symlink_target": ""
}
|
import os
from functions_framework import _function_registry
def test_get_function_signature():
test_cases = [
{
"name": "get decorator type",
"function": "my_func",
"registered_type": "http",
"flag_type": "event",
"env_type": "event",
"want_type": "http",
},
{
"name": "get flag type",
"function": "my_func_1",
"registered_type": "",
"flag_type": "event",
"env_type": "http",
"want_type": "event",
},
{
"name": "get env var",
"function": "my_func_2",
"registered_type": "",
"flag_type": "",
"env_type": "event",
"want_type": "event",
},
]
for case in test_cases:
_function_registry.REGISTRY_MAP[case["function"]] = case["registered_type"]
os.environ[_function_registry.FUNCTION_SIGNATURE_TYPE] = case["env_type"]
signature_type = _function_registry.get_func_signature_type(
case["function"], case["flag_type"]
)
assert signature_type == case["want_type"], case["name"]
def test_get_function_signature_default():
_function_registry.REGISTRY_MAP["my_func"] = ""
if _function_registry.FUNCTION_SIGNATURE_TYPE in os.environ:
del os.environ[_function_registry.FUNCTION_SIGNATURE_TYPE]
signature_type = _function_registry.get_func_signature_type("my_func", None)
assert signature_type == "http"
|
{
"content_hash": "3d0d7446d7ca3f164368465782054105",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 83,
"avg_line_length": 31.612244897959183,
"alnum_prop": 0.5326016785022595,
"repo_name": "GoogleCloudPlatform/functions-framework-python",
"id": "e3ae3c7eea9427a0cd7152cc35bef94d29317b5f",
"size": "2124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_function_registry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "85"
},
{
"name": "Python",
"bytes": "148932"
},
{
"name": "Shell",
"bytes": "785"
}
],
"symlink_target": ""
}
|
"""
Support for functionality to interact with FireTV devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.firetv/
"""
|
{
"content_hash": "484e186da5c8d74380b16f6d3fa3390d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 74,
"avg_line_length": 33.5,
"alnum_prop": 0.7910447761194029,
"repo_name": "HydrelioxGitHub/home-assistant",
"id": "68f556313328dee9dfaa86c3e50eba4c20d660ec",
"size": "201",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/firetv/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "14330009"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
}
|
import queue
import threading
from .http_common import *
import simple_http_client
import utils
def pack_headers(headers):
out_list = []
for k, v in headers.items():
if isinstance(v, int):
out_list.append(b'%s: %d\r\n' % (utils.to_bytes(k), v))
else:
out_list.append(b'%s: %s\r\n' % (utils.to_bytes(k), utils.to_bytes(v)))
return b''.join(out_list)
class Http1Worker(HttpWorker):
version = "1.1"
def __init__(self, logger, ip_manager, config, ssl_sock, close_cb, retry_task_cb, idle_cb, log_debug_data):
super(Http1Worker, self).__init__(logger, ip_manager, config, ssl_sock,
close_cb, retry_task_cb, idle_cb, log_debug_data)
self.task = None
self.request_onway = False
self.transfered_size = 0
self.trace_time = []
self.trace_time.append([ssl_sock.create_time, "connect"])
self.record_active("init")
self.task_queue = queue.Queue()
threading.Thread(target=self.work_loop).start()
self.idle_cb()
if self.config.http1_first_ping_wait or \
self.config.http1_ping_interval or \
self.config.http1_idle_time:
threading.Thread(target=self.keep_alive_thread).start()
def record_active(self, active=""):
self.trace_time.append([time.time(), active])
# self.logger.debug("%s stat:%s", self.ip, active)
def get_trace(self):
out_list = []
last_time = self.trace_time[0][0]
for t, stat in self.trace_time:
time_diff = int((t - last_time) * 1000)
last_time = t
out_list.append(" %d:%s" % (time_diff, stat))
out_list.append(":%d" % ((time.time() - last_time) * 1000))
out_list.append(" processed:%d" % self.processed_tasks)
out_list.append(" transfered:%d" % self.transfered_size)
out_list.append(" sni:%s" % self.ssl_sock.sni)
return ",".join(out_list)
def get_rtt_rate(self):
return self.rtt + 100
def request(self, task):
self.accept_task = False
self.task = task
self.task_queue.put(task)
def keep_alive_thread(self):
while time.time() - self.ssl_sock.create_time < self.config.http1_first_ping_wait:
if not self.keep_running:
self.close("exit ")
return
time.sleep(1)
if self.config.http1_first_ping_wait and self.processed_tasks == 0:
self.task_queue.put("ping")
if self.config.http1_ping_interval:
while self.keep_running:
time_to_ping = max(self.config.http1_ping_interval - (time.time() - self.last_recv_time), 0.2)
time.sleep(time_to_ping)
if not self.request_onway and \
time.time() - self.last_recv_time > self.config.http1_ping_interval - 1:
self.task_queue.put("ping")
time.sleep(1)
elif self.config.http1_idle_time:
while self.keep_running:
time_to_sleep = max(self.config.http1_idle_time - (time.time() - self.last_recv_time), 0.2)
time.sleep(time_to_sleep)
if not self.request_onway and time.time() - self.last_recv_time > self.config.http1_idle_time:
self.close("idle timeout")
return
def work_loop(self):
while self.keep_running:
task = self.task_queue.get(True)
if not task:
# None task means exit
self.accept_task = False
self.keep_running = False
return
if task == "ping":
if not self.head_request():
self.ip_manager.recheck_ip(self.ssl_sock.ip_str)
self.close("keep alive")
return
self.last_recv_time = time.time()
continue
# self.logger.debug("http1 get task")
time_now = time.time()
if time_now - self.last_recv_time > self.config.http1_idle_time:
self.logger.warn("get task but inactive time:%d", time_now - self.last_recv_time)
self.task = task
self.close("inactive timeout %d" % (time_now - self.last_recv_time))
return
self.request_task(task)
self.request_onway = False
self.last_send_time = time_now
self.last_recv_time = time_now
if self.processed_tasks > self.config.http1_max_process_tasks:
self.close("lift end.")
return
def request_task(self, task):
timeout = task.timeout
self.request_onway = True
start_time = time.time()
self.record_active("request")
task.set_state("h1_req")
task.headers[b'Host'] = self.get_host(task.host)
task.headers[b"Content-Length"] = len(task.body)
request_data = b'%s %s HTTP/1.1\r\n' % (task.method, task.path)
request_data += pack_headers(task.headers)
request_data += b'\r\n'
try:
self.ssl_sock.send(request_data)
payload_len = len(task.body)
start = 0
while start < payload_len:
send_size = min(payload_len - start, 65535)
sended = self.ssl_sock.send(task.body[start:start+send_size])
start += sended
task.set_state("h1_req_sended")
response = simple_http_client.Response(self.ssl_sock)
response.begin(timeout=timeout)
task.set_state("response_begin")
except Exception as e:
self.logger.warn("%s h1_request:%r inactive_time:%d task.timeout:%d",
self.ip_str, e, time.time() - self.last_recv_time, task.timeout)
self.logger.warn('%s trace:%s', self.ip_str, self.get_trace())
self.retry_task_cb(task)
self.task = None
self.close("down fail")
return
task.set_state("h1_get_head")
time_left = timeout - (time.time() - start_time)
if task.method == b"HEAD" or response.status in [204, 304]:
response.content_length = 0
response.ssl_sock = self.ssl_sock
response.task = task
response.worker = self
task.content_length = response.content_length
task.responsed = True
task.queue.put(response)
try:
read_target = int(response.content_length)
except:
read_target = 0
data_len = 0
while True:
try:
data = response.read(timeout=time_left)
if not data:
break
except Exception as e:
self.logger.warn("read fail, ip:%s, chunk:%d url:%s task.timeout:%d e:%r",
self.ip_str, response.chunked, task.url, task.timeout, e)
self.logger.warn('%s trace:%s', self.ip_str, self.get_trace())
self.close("down fail")
return
task.put_data(data)
length = len(data)
data_len += length
if read_target and data_len >= read_target:
break
if read_target > data_len:
self.logger.warn("read fail, ip:%s, chunk:%d url:%s task.timeout:%d ",
self.ip_str, response.chunked, task.url, task.timeout)
self.ip_manager.recheck_ip(self.ssl_sock.ip_str)
self.close("down fail")
task.finish()
self.ssl_sock.received_size += data_len
time_cost = (time.time() - start_time)
if time_cost != 0:
speed = data_len / time_cost
task.set_state("h1_finish[SP:%d]" % speed)
self.transfered_size += len(request_data) + data_len
self.task = None
self.accept_task = True
self.idle_cb()
self.processed_tasks += 1
self.last_recv_time = time.time()
self.record_active("Res")
def head_request(self):
if not self.ssl_sock.host:
# self.logger.warn("try head but no host set")
return True
# for keep alive, not work now.
self.request_onway = True
self.record_active("head")
start_time = time.time()
# self.logger.debug("head request %s", self.ip)
request_data = b'GET / HTTP/1.1\r\nHost: %s\r\n\r\n' % utils.to_bytes(self.ssl_sock.host)
try:
data = request_data
ret = self.ssl_sock.send(data)
if ret != len(data):
self.logger.warn("h1 head send len:%r %d %s", ret, len(data), self.ip_str)
self.logger.warn('%s trace:%s', self.ip_str, self.get_trace())
return False
response = simple_http_client.Response(self.ssl_sock)
response.begin(timeout=5)
status = response.status
if status != 200:
self.logger.warn("%s host:%s head fail status:%d", self.ip_str, self.ssl_sock.host, status)
return False
content = response.readall(timeout=5)
self.record_active("head end")
self.rtt = (time.time() - start_time) * 1000
#self.ip_manager.update_ip(self.ip, self.rtt)
return True
except Exception as e:
self.logger.warn("h1 %s HEAD keep alive request fail:%r", self.ssl_sock.ip_str, e)
self.logger.warn('%s trace:%s', self.ip_str, self.get_trace())
self.close("down fail")
finally:
self.request_onway = False
def close(self, reason=""):
# Notify loop to exit
# This function may be call by out side http2
# When gae_proxy found the appid or ip is wrong
self.accept_task = False
self.keep_running = False
self.task_queue.put(None)
if self.task is not None:
if self.task.responsed:
self.task.finish()
else:
self.retry_task_cb(self.task)
self.task = None
super(Http1Worker, self).close(reason)
|
{
"content_hash": "ff2d234b5005972ebd89562f4b8b9de0",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 111,
"avg_line_length": 35.76655052264808,
"alnum_prop": 0.5379444715051145,
"repo_name": "xyuanmu/XX-Net",
"id": "d68ca6c8bd096ab6e207d569f2e7d65e851f3954",
"size": "10266",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/default/lib/noarch/front_base/http1.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4145"
},
{
"name": "C",
"bytes": "53301"
},
{
"name": "CSS",
"bytes": "94951"
},
{
"name": "HTML",
"bytes": "252022"
},
{
"name": "JavaScript",
"bytes": "22405"
},
{
"name": "Python",
"bytes": "15474534"
},
{
"name": "Shell",
"bytes": "10208"
},
{
"name": "Visual Basic",
"bytes": "1795"
}
],
"symlink_target": ""
}
|
from flask import request
from netman.api.api_utils import BadRequest, to_response
from netman.api.objects import bond, interface, vlan
from netman.api.switch_api_base import SwitchApiBase
from netman.api.validators import Switch, is_boolean, is_vlan_number, Interface, Vlan, resource, content, is_ip_network, \
IPNetworkResource, is_access_group_name, Direction, is_vlan, is_bond, Bond, \
is_bond_link_speed, is_bond_number, is_description, is_vrf_name, \
is_vrrp_group, VrrpGroup, is_dict_with, optional, is_type, is_int, is_unincast_rpf_mode
from netman.core.objects.interface_states import OFF, ON
from netman.core.validator import is_valid_mpls_state
class SwitchApi(SwitchApiBase):
def hook_to(self, server):
server.add_url_rule('/switches/<hostname>/versions', view_func=self.get_versions, methods=['GET'])
server.add_url_rule('/switches/<hostname>/vlans', view_func=self.get_vlans, methods=['GET'])
server.add_url_rule('/switches/<hostname>/vlans', view_func=self.add_vlan, methods=['POST'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>', view_func=self.get_vlan, methods=['GET'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>', view_func=self.remove_vlan, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/interfaces', view_func=self.get_vlan_interfaces, methods=['GET'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/ips', view_func=self.add_ip, methods=['POST'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/ips/<path:ip_network>', view_func=self.remove_ip, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/vrrp-groups', view_func=self.add_vrrp_group, methods=['POST'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/vrrp-groups/<vrrp_group_id>', view_func=self.remove_vrrp_group, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/varp-ips', view_func=self.add_varp_ip, methods=['POST'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/varp-ips/<path:ip_network>', view_func=self.remove_varp_ip, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/access-groups/<direction>', view_func=self.set_vlan_access_group, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/access-groups/<direction>', view_func=self.unset_vlan_access_group, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/vrf-forwarding', view_func=self.set_vlan_vrf, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/vrf-forwarding', view_func=self.unset_vlan_vrf, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/dhcp-relay-server', view_func=self.add_dhcp_relay_server, methods=['POST'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/dhcp-relay-server/<ip_network>', view_func=self.remove_dhcp_relay_server, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/arp-routing', view_func=self.set_vlan_arp_routing_state, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/icmp-redirects', view_func=self.set_vlan_icmp_redirects_state, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/ntp', view_func=self.set_vlan_ntp_state, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/unicast-rpf-mode', view_func=self.set_vlan_unicast_rpf_mode, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/unicast-rpf-mode', view_func=self.unset_vlan_unicast_rpf_mode, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/load-interval', view_func=self.set_vlan_load_interval, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/load-interval', view_func=self.unset_vlan_load_interval, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/vlans/<vlan_number>/mpls-ip', view_func=self.set_vlan_mpls_ip_state, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/interfaces', view_func=self.get_interfaces, methods=['GET'])
server.add_url_rule('/switches/<hostname>/mac-addresses', view_func=self.get_mac_addresses, methods=['GET'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>', view_func=self.reset_interface, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>', view_func=self.get_interface, methods=['GET'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/shutdown', view_func=self.set_shutdown_state, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/shutdown', view_func=self.unset_shutdown_state, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/port-mode', view_func=self.set_port_mode, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/access-vlan', view_func=self.set_access_vlan, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/access-vlan', view_func=self.unset_interface_access_vlan, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/trunk-vlans', view_func=self.add_trunk_vlan, methods=['POST'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/trunk-vlans/<vlan_number>', view_func=self.remove_trunk_vlan, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/trunk-native-vlan', view_func=self.set_interface_native_vlan, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/trunk-native-vlan', view_func=self.unset_interface_native_vlan, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/bond-master', view_func=self.add_interface_to_bond, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/bond-master', view_func=self.remove_interface_from_bond, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/description', view_func=self.set_interface_description, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/description', view_func=self.unset_interface_description, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/spanning-tree', view_func=self.edit_interface_spanning_tree, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/lldp', view_func=self.set_interface_lldp_state, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/auto-negotiation', view_func=self.set_interface_auto_negotiation_state, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/auto-negotiation', view_func=self.unset_interface_auto_negotiation_state, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/mtu', view_func=self.set_interface_mtu, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/interfaces/<path:interface_id>/mtu', view_func=self.unset_interface_mtu, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/bonds', view_func=self.get_bonds, methods=['GET'])
server.add_url_rule('/switches/<hostname>/bonds', view_func=self.add_bond, methods=['POST'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>', view_func=self.get_bond, methods=['GET'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>', view_func=self.remove_bond, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>/link-speed', view_func=self.set_bond_link_speed, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>/port-mode', view_func=self.set_bond_port_mode, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>/access-vlan', view_func=self.set_bond_access_vlan, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>/access-vlan', view_func=self.remove_bond_access_vlan, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>/trunk-vlans', view_func=self.add_bond_trunk_vlan, methods=['POST'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>/trunk-vlans/<vlan_number>', view_func=self.remove_bond_trunk_vlan, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>/trunk-native-vlan', view_func=self.set_bond_native_vlan, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>/trunk-native-vlan', view_func=self.unset_bond_native_vlan, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>/description', view_func=self.set_bond_description, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>/description', view_func=self.unset_bond_description, methods=['DELETE'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>/spanning-tree', view_func=self.edit_bond_spanning_tree, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>/mtu', view_func=self.set_bond_mtu, methods=['PUT'])
server.add_url_rule('/switches/<hostname>/bonds/<bond_number>/mtu', view_func=self.unset_bond_mtu, methods=['DELETE'])
return self
@to_response
@resource(Switch)
def get_versions(self, switch):
"""
Displays various hardware and software versions about the switch
:arg str hostname: Hostname or IP of the switch
:code 200 OK:
Example output:
.. literalinclude:: ../doc_config/api_samples/get_switch_hostname_versions.json
:language: json
"""
return 200, switch.get_versions()
@to_response
@resource(Switch)
def get_vlans(self, switch):
"""
Displays informations about all VLANs
:arg str hostname: Hostname or IP of the switch
:code 200 OK:
Example output:
.. literalinclude:: ../doc_config/api_samples/get_switch_hostname_vlans.json
:language: json
"""
vlans = sorted(switch.get_vlans(), key=lambda x: x.number)
return 200, [vlan.to_api(v) for v in vlans]
@to_response
@resource(Switch, Vlan)
def get_vlan_interfaces(self, switch, vlan_number):
"""
Displays interfaces use in a VLAN
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:code 200 OK:
Example output:
.. literalinclude:: ../doc_config/api_samples/get_switch_hostname_vlans_vlan_interfaces.json
:language: json
"""
return 200, switch.get_vlan_interfaces(vlan_number)
@to_response
@resource(Switch, Vlan)
def get_vlan(self, switch, vlan_number):
"""
Displays informations about one VLAN
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:code 200 OK:
Example output:
.. literalinclude:: ../doc_config/api_samples/get_switch_hostname_vlans_vlan.json
:language: json
"""
return 200, vlan.to_api(switch.get_vlan(vlan_number))
@to_response
@content(is_vlan)
@resource(Switch)
def add_vlan(self, switch, number, name):
"""
Create an new VLAN
:arg str hostname: Hostname or IP of the switch
:body:
Highlighted fields are mandatory
.. literalinclude:: ../doc_config/api_samples/post_switch_hostname_vlans.json
:language: json
:emphasize-lines: 2
"""
switch.add_vlan(number, name)
return 201, None
@to_response
@resource(Switch, Vlan)
def remove_vlan(self, switch, vlan_number):
"""
Deletes a VLAN
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
"""
switch.remove_vlan(vlan_number)
return 204, None
@to_response
@content(is_ip_network)
@resource(Switch, Vlan)
def add_ip(self, switch, vlan_number, validated_ip_network):
"""
Adds an IP/Subnet to a vlan
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:body:
Highlighted fields are mandatory
.. literalinclude:: ../doc_config/api_samples/post_switch_hostname_vlans_vlanid_ips.json
:language: json
:emphasize-lines: 2-3
or
.. literalinclude:: ../doc_config/api_samples/post_switch_hostname_vlans_vlanid_ips.txt
"""
switch.add_ip_to_vlan(vlan_number, validated_ip_network)
return 201, None
@to_response
@resource(Switch, Vlan, IPNetworkResource)
def remove_ip(self, switch, vlan_number, ip_network):
"""
Removes an IP/Subnet from a vlan
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:arg str ip_network: IP/Subnet in the "x.x.x.x/xx" format
"""
switch.remove_ip_from_vlan(vlan_number, ip_network)
return 204, None
@to_response
@content(is_vrrp_group)
@resource(Switch, Vlan)
def add_vrrp_group(self, switch, vlan_number, group_id, ips=None, priority=None, hello_interval=None,
dead_interval=None, track_id=None, track_decrement=None):
"""
Adds a VRRP group to a VLAN
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: VLAN number, between 1 and 4096
:body:
Highlighted fields are mandatory
.. literalinclude:: ../doc_config/api_samples/post_switch_hostname_vlans_vlanid_vrrp_groups.json
:language: json
:emphasize-lines: 2-3
"""
switch.add_vrrp_group(vlan_number=vlan_number,
group_id=group_id,
ips=ips,
priority=priority,
hello_interval=hello_interval,
dead_interval=dead_interval,
track_id=track_id,
track_decrement=track_decrement)
return 201, None
@to_response
@resource(Switch, Vlan, VrrpGroup)
def remove_vrrp_group(self, switch, vlan_number, vrrp_group_id):
"""
Removes a VRRP group from a VLAN
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: VLAN number, between 1 and 4096
:arg str vrrp_group_id: VRRP group number, between 1 and 255
"""
switch.remove_vrrp_group(vlan_number, vrrp_group_id)
return 204, None
@to_response
@content(is_ip_network)
@resource(Switch, Vlan)
def add_varp_ip(self, switch, vlan_number, validated_ip_network):
"""
Adds a VARP IP Network to a VLAN
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: VLAN number, between 1 and 4096
:body:
Highlighted fields are mandatory
.. literalinclude:: ../doc_config/api_samples/post_switch_hostname_vlans_vlanid_varp_ips.json
:language: json
:emphasize-lines: 2-3
or
.. literalinclude:: ../doc_config/api_samples/post_switch_hostname_vlans_vlanid_varp_ips.txt
"""
switch.add_vlan_varp_ip(vlan_number=vlan_number, ip_network=validated_ip_network)
return 201, None
@to_response
@resource(Switch, Vlan, IPNetworkResource)
def remove_varp_ip(self, switch, vlan_number, ip_network):
"""
Removes a VARP ip from a VLAN
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: VLAN number, between 1 and 4096
:arg str ip_network: IP/Subnet in the "x.x.x.x/xx" format
"""
switch.remove_vlan_varp_ip(vlan_number=vlan_number, ip_network=ip_network)
return 204, None
@to_response
@content(is_access_group_name)
@resource(Switch, Vlan, Direction)
def set_vlan_access_group(self, switch, vlan_number, direction, access_group_name):
"""
Sets the inbound or outgoing ip access-group on a Vlan
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:arg int direction: ``in`` or ``out``
:body:
.. literalinclude:: ../doc_config/api_samples/put_switch_hostname_vlans_vlanid_accessgroups_in.txt
"""
switch.set_vlan_access_group(vlan_number, direction, access_group_name)
return 204, None
@to_response
@resource(Switch, Vlan, Direction)
def unset_vlan_access_group(self, switch, vlan_number, direction):
"""
Removes the inbound or outgoing ip access-group of a Vlan
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:arg int direction: ``in`` or ``out``
"""
switch.unset_vlan_access_group(vlan_number, direction)
return 204, None
@to_response
@content(is_int)
@resource(Switch, Vlan)
def set_vlan_load_interval(self, switch, vlan_number, value):
"""
Sets the load interval of a vlan
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:body:
.. literalinclude:: ../doc_config/api_samples/put_switch_hostname_vlans_vlanid_load_interval.txt
"""
switch.set_vlan_load_interval(vlan_number, value)
return 204, None
@to_response
@resource(Switch, Vlan)
def unset_vlan_load_interval(self, switch, vlan_number):
"""
Unsets the load interval of a vlan
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
"""
switch.unset_vlan_load_interval(vlan_number)
return 204, None
@to_response
@content(is_valid_mpls_state)
@resource(Switch, Vlan)
def set_vlan_mpls_ip_state(self, switch, vlan_number, state):
"""
Sets the mpls ip state of a vlan
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:body:
``true`` or ``false``
"""
switch.set_vlan_mpls_ip_state(vlan_number, state)
return 204, None
@to_response
@resource(Switch)
def get_interface(self, switch, interface_id):
"""
Displays informations about a physical interfaces
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: name of the interface
:code 200 OK:
Example output:
.. literalinclude:: ../doc_config/api_samples/get_switch_hostname_interface.json
:language: json
"""
return 200, interface.to_api(switch.get_interface(interface_id))
@to_response
@resource(Switch)
def get_interfaces(self, switch):
"""
Displays informations about all physical interfaces
:arg str hostname: Hostname or IP of the switch
:code 200 OK:
Example output:
.. literalinclude:: ../doc_config/api_samples/get_switch_hostname_interfaces.json
:language: json
"""
interfaces = sorted(switch.get_interfaces(), key=lambda x: x.name.lower())
return 200, [interface.to_api(i) for i in interfaces]
@to_response
@content(is_boolean)
@resource(Switch, Interface)
def set_shutdown_state(self, switch, interface_id, state):
"""
Sets the shutdown state of an interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
:body:
``true`` or ``false``
"""
if state:
switch.set_interface_state(interface_id, OFF)
else:
switch.set_interface_state(interface_id, ON)
return 204, None
@to_response
@resource(Switch, Interface)
def unset_shutdown_state(self, switch, interface_id):
"""
Unsets the shutdown state of an interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
"""
switch.unset_interface_state(interface_id)
return 204, None
@to_response
@content(is_boolean)
@resource(Switch, Interface)
def set_interface_auto_negotiation_state(self, switch, interface_id, state):
"""
Sets the auto_negotiation state of an interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
:body:
``true`` or ``false``
"""
switch.set_interface_auto_negotiation_state(interface_id, ON if state is True else OFF)
return 204, None
@to_response
@resource(Switch, Interface)
def unset_interface_auto_negotiation_state(self, switch, interface_id):
"""
Unsets the auto-negotiation state of an interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
"""
switch.unset_interface_auto_negotiation_state(interface_id)
return 204, None
@to_response
@content(is_int)
@resource(Switch, Interface)
def set_interface_mtu(self, switch, interface_id, value):
"""
Sets the mtu of an interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
:body:
.. literalinclude:: ../doc_config/api_samples/put_switch_hostname_interfaces_intname_mtu.txt
"""
switch.set_interface_mtu(interface_id, value)
return 204, None
@to_response
@resource(Switch, Interface)
def unset_interface_mtu(self, switch, interface_id):
"""
Unsets the mtu of an interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
"""
switch.unset_interface_mtu(interface_id)
return 204, None
@to_response
@content(is_int)
@resource(Switch, Bond)
def set_bond_mtu(self, switch, bond_number, value):
"""
Sets the mtu of a bond
:arg str hostname: Hostname or IP of the switch
:arg int bond_number: Bond number
:body:
.. literalinclude:: ../doc_config/api_samples/put_switch_hostname_interfaces_intname_mtu.txt
"""
switch.set_bond_mtu(bond_number, value)
return 204, None
@to_response
@resource(Switch, Bond)
def unset_bond_mtu(self, switch, bond_number):
"""
Unsets the mtu of an interface
:arg str hostname: Hostname or IP of the switch
:arg int bond_number: Bond number
"""
switch.unset_bond_mtu(bond_number)
return 204, None
@to_response
@resource(Switch, Interface)
def set_port_mode(self, switch, interface_id):
"""
Sets the port mode of an interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
:body:
``trunk`` or ``access``
"""
mode = request.get_data().lower()
if mode == 'trunk':
switch.set_trunk_mode(interface_id)
elif mode == 'access':
switch.set_access_mode(interface_id)
else:
raise BadRequest('Unknown port mode detected {}'.format(mode))
return 204, None
@to_response
@resource(Switch, Bond)
def set_bond_port_mode(self, switch, bond_number):
"""
Sets the port mode of a bond
:arg str hostname: Hostname or IP of the switch
:arg int bond_number: Bond number
:body:
``trunk`` or ``access``
"""
mode = request.get_data().lower()
if mode == 'trunk':
switch.set_bond_trunk_mode(bond_number)
elif mode == 'access':
switch.set_bond_access_mode(bond_number)
else:
raise BadRequest('Unknown port mode detected {}'.format(mode))
return 204, None
@to_response
@content(is_vlan_number)
@resource(Switch, Interface)
def set_access_vlan(self, switch, interface_id, vlan_number):
"""
Sets the access vlan of an interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
:body:
.. literalinclude:: ../doc_config/api_samples/put_switch_hostname_interfaces_intname_accessvlan.txt
"""
switch.set_access_vlan(interface_id, vlan_number)
return 204, None
@to_response
@resource(Switch, Interface)
def reset_interface(self, switch, interface_id):
"""
Reset the interface to it's default settings
Adding parameter to the interface is not yet supported. This requests only defaults the switch.
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
"""
if request.get_data():
raise NotImplementedError('Providing data is not supported by this method')
switch.reset_interface(interface_id)
return 204, None
@to_response
@resource(Switch, Interface)
def unset_interface_access_vlan(self, switch, interface_id):
"""
Removes the access vlan of an interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
"""
switch.unset_interface_access_vlan(interface_id)
return 204, None
@to_response
@content(is_vlan_number)
@resource(Switch, Bond)
def set_bond_access_vlan(self, switch, bond_number, vlan_number):
"""
Sets the access vlan of a bond
:arg str hostname: Hostname or IP of the switch
:arg int bond_number: Bond number
:body:
.. literalinclude:: ../doc_config/api_samples/put_switch_hostname_interfaces_intname_accessvlan.txt
"""
switch.set_bond_access_vlan(bond_number, vlan_number)
return 204, None
@to_response
@resource(Switch, Bond)
def remove_bond_access_vlan(self, switch, bond_number):
"""
Removes the access vlan of a bond
:arg str hostname: Hostname or IP of the switch
:arg int bond_number: Bond number
"""
switch.remove_bond_access_vlan(bond_number)
return 204, None
@to_response
@content(is_vlan_number)
@resource(Switch, Interface)
def add_trunk_vlan(self, switch, interface, vlan_number):
"""
Adds a vlan to the trunk members of an interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
:body:
.. literalinclude:: ../doc_config/api_samples/post_switch_hostname_interfaces_intname_trunkvlans.txt
"""
switch.add_trunk_vlan(interface, vlan_number)
return 204, None
@to_response
@resource(Switch, Interface, Vlan)
def remove_trunk_vlan(self, switch, interface_id, vlan_number):
"""
Removes a vlan from the trunk members of an interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
:arg int vlan_number: Vlan number, between 1 and 4096
"""
switch.remove_trunk_vlan(interface_id, vlan_number)
return 204, None
@to_response
@content(is_vlan_number)
@resource(Switch, Bond)
def add_bond_trunk_vlan(self, switch, bond_number, vlan_number):
"""
Adds a vlan to the trunk members of an interface
:arg str hostname: Hostname or IP of the switch
:arg int bond_number: Bond number
:body:
.. literalinclude:: ../doc_config/api_samples/post_switch_hostname_interfaces_intname_trunkvlans.txt
"""
switch.add_bond_trunk_vlan(bond_number, vlan_number)
return 204, None
@to_response
@resource(Switch, Bond, Vlan)
def remove_bond_trunk_vlan(self, switch, bond_number, vlan_number):
"""
Removes a vlan from the trunk members of a bonded interface
:arg str hostname: Hostname or IP of the switch
:arg int bond_number: Bond number
:arg int vlan_number: Vlan number, between 1 and 4096
"""
switch.remove_bond_trunk_vlan(bond_number, vlan_number)
return 204, None
@to_response
@content(is_vlan_number)
@resource(Switch, Interface)
def set_interface_native_vlan(self, switch, interface_id, vlan_number):
"""
Sets the native vlan of an interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
:body:
.. literalinclude:: ../doc_config/api_samples/put_switch_hostname_interfaces_intname_nativevlan.txt
"""
switch.set_interface_native_vlan(interface_id, vlan_number)
return 204, None
@to_response
@resource(Switch, Interface)
def unset_interface_native_vlan(self, switch, interface_id):
"""
Removes the native vlan of an interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
"""
switch.unset_interface_native_vlan(interface_id)
return 204, None
@to_response
@content(is_vlan_number)
@resource(Switch, Bond)
def set_bond_native_vlan(self, switch, bond_number, vlan_number):
"""
Sets the native vlan of an interface
:arg str hostname: Hostname or IP of the switch
:arg int bond_number: Bond number
:body:
.. literalinclude:: ../doc_config/api_samples/put_switch_hostname_interfaces_intname_nativevlan.txt
"""
switch.set_bond_native_vlan(bond_number, vlan_number)
return 204, None
@to_response
@resource(Switch, Bond)
def unset_bond_native_vlan(self, switch, bond_number):
"""
Removes the native vlan of an interface
:arg str hostname: Hostname or IP of the switch
:arg int bond_number: Bond number
"""
switch.unset_bond_native_vlan(bond_number)
return 204, None
@to_response
@resource(Switch, Bond)
def get_bond(self, switch, bond_number):
"""
Displays informations about a bond
:arg str hostname: Hostname or IP of the switch
:code 200 OK:
Example output:
.. literalinclude:: ../doc_config/api_samples/get_switch_hostname_bond_v2.json
:language: json
"""
return 200, bond.to_api(
switch.get_bond(bond_number),
version=request.headers.get("Netman-Max-Version"))
@to_response
@resource(Switch)
def get_bonds(self, switch):
"""
Displays informations about all bonds
:arg str hostname: Hostname or IP of the switch
:code 200 OK:
Example output:
.. literalinclude:: ../doc_config/api_samples/get_switch_hostname_bonds_v2.json
:language: json
"""
bonds = sorted(switch.get_bonds(), key=lambda x: x.number)
return 200, [bond.to_api(
b, version=request.headers.get("Netman-Max-Version")
) for b in bonds]
@to_response
@content(is_bond)
@resource(Switch)
def add_bond(self, switch, bond_number):
"""
Create an new bond
:arg str hostname: Hostname or IP of the switch
:body:
Highlighted fields are mandatory
.. literalinclude:: ../doc_config/api_samples/post_switch_hostname_bonds.json
:language: json
:emphasize-lines: 2
"""
switch.add_bond(bond_number)
return 201, None
@to_response
@resource(Switch, Bond)
def remove_bond(self, switch, bond_number):
"""
Deletes a bond
:arg str hostname: Hostname or IP of the switch
:arg int bond_number: Bond number
"""
switch.remove_bond(bond_number)
return 204, None
@to_response
@content(is_bond_link_speed)
@resource(Switch, Bond)
def set_bond_link_speed(self, switch, bond_number, bond_link_speed):
"""
Change a bond link speed
:arg str hostname: Hostname or IP of the switch
:arg int bond_number: Bond number
:arg str bond_link_speed: Bond link speed
:body:
.. literalinclude:: ../doc_config/api_samples/put_switch_hostname_bonds_link_speed.txt
"""
switch.set_bond_link_speed(bond_number, bond_link_speed)
return 204, None
@to_response
@content(is_bond_number)
@resource(Switch, Interface)
def add_interface_to_bond(self, switch, interface_id, bond_number):
"""
Add interface to bond
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
:arg int bond_number: Bond number
:body:
.. literalinclude:: ../doc_config/api_samples/put_switch_hostname_interfaces_bond_master.txt
"""
switch.add_interface_to_bond(interface_id, bond_number)
return 204, None
@to_response
@resource(Switch, Interface)
def remove_interface_from_bond(self, switch, interface_id):
"""
Free interface from bond
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
"""
switch.remove_interface_from_bond(interface_id)
return 204, None
@to_response
@content(is_description)
@resource(Switch, Interface)
def set_interface_description(self, switch, interface_id, description):
"""
Add a description to the interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
:body:
A long interface description text
"""
switch.set_interface_description(interface_id, description)
return 204, None
@to_response
@resource(Switch, Interface)
def unset_interface_description(self, switch, interface_id):
"""
Remove interface description
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
"""
switch.unset_interface_description(interface_id)
return 204, None
@to_response
@content(is_description)
@resource(Switch, Bond)
def set_bond_description(self, switch, bond_number, description):
"""
Add a description to the bonded interface
:arg str hostname: Hostname or IP of the switch
:arg int bond_number: Bond number
:body:
A long interface description text
"""
switch.set_bond_description(bond_number, description)
return 204, None
@to_response
@resource(Switch, Bond)
def unset_bond_description(self, switch, bond_number):
"""
Remove bonded interface description
:arg str hostname: Hostname or IP of the switch
:arg int bond_number: Bond number
"""
switch.unset_bond_description(bond_number)
return 204, None
@to_response
@content(is_dict_with(
edge=optional(is_type(bool))))
@resource(Switch, Bond)
def edit_bond_spanning_tree(self, switch, bond_number, **params):
"""
Edit bond spanning tree properties
:arg bool edge: Activates edge mode
:body:
.. literalinclude:: ../doc_config/api_samples/put_switch_hostname_interfaces_intname_spanningtree.json
"""
switch.edit_bond_spanning_tree(bond_number, **params)
return 204, None
@to_response
@content(is_dict_with(
edge=optional(is_type(bool))))
@resource(Switch, Interface)
def edit_interface_spanning_tree(self, switch, interface_id, **params):
"""
Edit interface spanning tree properties
:arg bool edge: Activates edge mode
:body:
.. literalinclude:: ../doc_config/api_samples/put_switch_hostname_interfaces_intname_spanningtree.json
"""
switch.edit_interface_spanning_tree(interface_id, **params)
return 204, None
@to_response
@content(is_vrf_name)
@resource(Switch, Vlan)
def set_vlan_vrf(self, switch, vlan_number, vrf_name):
"""
Set VLAN VRF name
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:body:
DEFAULT_LAN
"""
switch.set_vlan_vrf(vlan_number, vrf_name)
return 204, None
@to_response
@resource(Switch, Vlan)
def unset_vlan_vrf(self, switch, vlan_number):
"""
Remove VLAN VRF name
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
"""
switch.unset_vlan_vrf(vlan_number)
return 204, None
@to_response
@content(is_ip_network)
@resource(Switch, Vlan)
def add_dhcp_relay_server(self, switch, vlan_number, validated_ip_network):
"""
Set DHCP relay server (ip helper address)
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:body:
IP address of the DHCP server or its relay
"""
switch.add_dhcp_relay_server(vlan_number=vlan_number, ip_address=validated_ip_network.ip)
return 204, None
@to_response
@resource(Switch, Vlan, IPNetworkResource)
def remove_dhcp_relay_server(self, switch, vlan_number, ip_network):
"""
Remove DHCP relay server (ip helper address)
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
"""
switch.remove_dhcp_relay_server(vlan_number=vlan_number, ip_address=ip_network.ip)
return 204, None
@to_response
@content(is_boolean)
@resource(Switch, Interface)
def set_interface_lldp_state(self, switch, interface_id, state):
"""
Enable or disable the LLDP protocol on the interface
:arg str hostname: Hostname or IP of the switch
:arg str interface_id: Interface name (ex. ``FastEthernet0/1``, ``ethernet1/11``)
:body:
``true`` or ``false``
"""
switch.set_interface_lldp_state(interface_id, state)
return 204, None
@to_response
@content(is_boolean)
@resource(Switch, Vlan)
def set_vlan_arp_routing_state(self, switch, vlan_number, state):
"""
Sets the ARP routing state of an interface
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:body:
``true`` or ``false``
"""
switch.set_vlan_arp_routing_state(vlan_number, ON if state is True else OFF)
return 204, None
@to_response
@content(is_boolean)
@resource(Switch, Vlan)
def set_vlan_icmp_redirects_state(self, switch, vlan_number, state):
"""
Sets the ICMP redirects state of an interface
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:body:
``true`` or ``false``
"""
switch.set_vlan_icmp_redirects_state(vlan_number, state)
return 204, None
@to_response
@content(is_boolean)
@resource(Switch, Vlan)
def set_vlan_ntp_state(self, switch, vlan_number, state):
"""
Enable or disable the ntp state of an interface
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:body:
``true`` or ``false``
"""
switch.set_vlan_ntp_state(vlan_number, state)
return 204, None
@to_response
@content(is_unincast_rpf_mode)
@resource(Switch, Vlan)
def set_vlan_unicast_rpf_mode(self, switch, vlan_number, mode):
"""
Sets the Unicast RPF state of an interface *only strict is supported*
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
:body:
``STRICT``
"""
switch.set_vlan_unicast_rpf_mode(vlan_number, mode)
return 204, None
@to_response
@resource(Switch, Vlan)
def unset_vlan_unicast_rpf_mode(self, switch, vlan_number):
"""
Remove Unicast RPF configuration of an interface
:arg str hostname: Hostname or IP of the switch
:arg int vlan_number: Vlan number, between 1 and 4096
"""
switch.unset_vlan_unicast_rpf_mode(vlan_number)
return 204, None
@to_response
@resource(Switch)
def get_mac_addresses(self, switch):
"""
Retrieves the visible mac addresses on the specified switch
:arg str hostname: Hostname or IP of the switch
:code 200 OK:
Example output:
.. literalinclude:: ../doc_config/api_samples/get_switch_hostname_mac_addresses.json
:language: json
"""
return 200, [port.__dict__ for port in switch.get_mac_addresses()]
|
{
"content_hash": "6cb11afb80b65372a9b2f853b438d420",
"timestamp": "",
"source": "github",
"line_count": 1177,
"max_line_length": 174,
"avg_line_length": 36.714528462192014,
"alnum_prop": 0.6286302732973874,
"repo_name": "internaphosting/netman",
"id": "364ed37e9acc22b0a46a20dcd8896b9958abab6b",
"size": "43787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netman/api/switch_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "1206316"
}
],
"symlink_target": ""
}
|
"""
This module is used to load configurations.
"""
import json
import os
class ConfigurationManager(object):
"""
A class that will be used to setup the configuration.
Available configurations:
- Parameters:
- number_of_topics: int
- number_of_peers: int
- min_similarity_threshold: float
- max_similarity_threshold: float
- first_user: int (zero-based index)
- last_user: int (zero-based index)
- dataset: string (dummy, citeulike-a, citeulike-t)
- methods:
- paper_presentation: string ("lda")
- sampling: string ("least_similar_k")
- pair_formulation: string
- learning: string
- metrics:
- ndcg: list of int, give different values for k (ndcg@k)
- mrr: int, give the values for k (mrr@k)
- recall: list of int, give different values for k (recall@k)
- AUC: Boolean (True, or False)
"""
def __init__(self):
"""
Constructs a configuration from the config/ directory.
"""
base_dir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(os.path.dirname(base_dir), 'config/configuration.json')) as data_file:
self.config_dict = json.load(data_file)['configuration']
def get_number_of_topics(self):
return self.config_dict['parameters']['number_of_topics']
def get_number_of_peers(self):
return self.config_dict['parameters']['number_of_peers']
def get_min_similarity_threshold(self):
return self.config_dict['parameters']['min_similarity_threshold']
def get_max_similarity_threshold(self):
return self.config_dict['parameters']['max_similarity_threshold']
def get_first_user(self):
return self.config_dict['parameters']['first_user']
def get_last_user(self):
return self.config_dict['parameters']['last_user']
def get_dataset(self):
return self.config_dict['parameters']['dataset']
def get_paper_presentation(self):
return self.config_dict['methods']['paper_presentation']
def get_sampling(self):
return self.config_dict['methods']['sampling']
def get_ndcg_values(self):
return self.config_dict['metrics']['ndcg']
def get_mrr_values(self):
return self.config_dict['metrics']['mrr']
def get_recall_values(self):
return self.config_dict['metrics']['recall']
def get_AUC(self):
return self.config_dict['metrics']['AUC']
|
{
"content_hash": "be0a05318913cca41fef429299407cd5",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 101,
"avg_line_length": 28.919540229885058,
"alnum_prop": 0.6255961844197139,
"repo_name": "anasalzogbi/SciPRec",
"id": "1649bfe7f5f8fce45fd304e108c7b485e1204eb4",
"size": "2538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/configuration_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "43831"
}
],
"symlink_target": ""
}
|
class Region(object):
"""Region corresponds to a Digital Ocean data center"""
def __init__(self, identifier, name):
self.id = identifier
self.name = name
def __repr__(self):
return "<%s: %s>" % (self.id, self.name)
def __str__(self):
return "%s: %s" % (self.id, self.name)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.__dict__ == other.__dict__
)
def __ne__(self, other):
return not self.__eq__(other)
@staticmethod
def get(service, identifier):
"""Return the Region given an identifier and None if not found.
:param identifier: The identifier for the region you would like to
retrieve
:param service: The service object for the Digital Ocean account
that holds the regions
"""
r = Region.regions(service)
for region in r:
if region.id == identifier:
return region
return None
@staticmethod
def regions(service):
"""Return the a list containing all the regions.
:param service: The service object for the Digital Ocean account
that holds the regions
"""
response = service.get("regions")
encoded_regions = response['regions']
result = []
for encoded_region in encoded_regions:
r = Region(encoded_region['id'], encoded_region['name'])
result.append(r)
return result
|
{
"content_hash": "5955f9fc35df2bab9f100f23e02b535a",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 74,
"avg_line_length": 28.754716981132077,
"alnum_prop": 0.5656167979002624,
"repo_name": "pirate42/docc",
"id": "be99cbcb1d2a90b20c075633ca5c215803f6f2ff",
"size": "1541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docc/region.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import os
import warnings
import sys
import pandas as pd
import numpy as np
from itertools import cycle
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
# Load Diabetes datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
# Create pandas DataFrame for sklearn ElasticNet linear_model
Y = np.array([y]).transpose()
d = np.concatenate((X, Y), axis=1)
cols = diabetes.feature_names + ["progression"]
data = pd.DataFrame(d, columns=cols)
# Import mlflow
import mlflow
import mlflow.sklearn
# Evaluate metrics
def eval_metrics(actual, pred):
rmse = np.sqrt(mean_squared_error(actual, pred))
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
if __name__ == "__main__":
warnings.filterwarnings("ignore")
np.random.seed(40)
# Split the data into training and test sets. (0.75, 0.25) split.
train, test = train_test_split(data)
# The predicted column is "progression" which is a quantitative measure of disease progression one year after baseline
train_x = train.drop(["progression"], axis=1)
test_x = test.drop(["progression"], axis=1)
train_y = train[["progression"]]
test_y = test[["progression"]]
alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.05
l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.05
# Run ElasticNet
lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)
lr.fit(train_x, train_y)
predicted_qualities = lr.predict(test_x)
(rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)
# Print out ElasticNet model metrics
print("Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio))
print(" RMSE: %s" % rmse)
print(" MAE: %s" % mae)
print(" R2: %s" % r2)
# Log mlflow attributes for mlflow UI
mlflow.log_param("alpha", alpha)
mlflow.log_param("l1_ratio", l1_ratio)
mlflow.log_metric("rmse", rmse)
mlflow.log_metric("r2", r2)
mlflow.log_metric("mae", mae)
mlflow.sklearn.log_model(lr, "model")
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the elastic net.")
alphas_enet, coefs_enet, _ = enet_path(X, y, eps=eps, l1_ratio=l1_ratio)
# Display results
fig = plt.figure(1)
ax = plt.gca()
colors = cycle(["b", "r", "g", "c", "k"])
neg_log_alphas_enet = -np.log10(alphas_enet)
for coef_e, c in zip(coefs_enet, colors):
l2 = plt.plot(neg_log_alphas_enet, coef_e, linestyle="--", c=c)
plt.xlabel("-Log(alpha)")
plt.ylabel("coefficients")
title = "ElasticNet Path by alpha for l1_ratio = " + str(l1_ratio)
plt.title(title)
plt.axis("tight")
# Save figures
fig.savefig("ElasticNet-paths.png")
# Close plot
plt.close(fig)
# Log artifacts (output files)
mlflow.log_artifact("ElasticNet-paths.png")
|
{
"content_hash": "8094290cbb87662bed5adf7dc3490b4e",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 122,
"avg_line_length": 30.33653846153846,
"alnum_prop": 0.6694136291600634,
"repo_name": "mlflow/mlflow",
"id": "d94880053bc096cc854872d1825a15057d4f0c2d",
"size": "3960",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/sklearn_elasticnet_diabetes/linux/train_diabetes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24965"
},
{
"name": "Dockerfile",
"bytes": "1206"
},
{
"name": "HTML",
"bytes": "16439"
},
{
"name": "Java",
"bytes": "276538"
},
{
"name": "JavaScript",
"bytes": "3606345"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "6057051"
},
{
"name": "R",
"bytes": "202454"
},
{
"name": "Scala",
"bytes": "39353"
},
{
"name": "Shell",
"bytes": "27246"
},
{
"name": "TSQL",
"bytes": "211"
},
{
"name": "TypeScript",
"bytes": "313772"
}
],
"symlink_target": ""
}
|
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import do_change_stream_invite_only
from zerver.lib.test_classes import ZulipTestCase
from zerver.models import Message, UserMessage, get_client, get_realm, get_stream
class TopicHistoryTest(ZulipTestCase):
def test_topics_history_zephyr_mirror(self) -> None:
user_profile = self.mit_user("sipbtest")
stream_name = "new_stream"
# Send a message to this new stream from another user
self.subscribe(self.mit_user("starnine"), stream_name)
stream = get_stream(stream_name, user_profile.realm)
self.send_stream_message(self.mit_user("starnine"), stream_name, topic_name="secret topic")
# Now subscribe this MIT user to the new stream and verify
# that the new topic is not accessible
self.login_user(user_profile)
self.subscribe(user_profile, stream_name)
endpoint = f"/json/users/me/{stream.id}/topics"
result = self.client_get(endpoint, {}, subdomain="zephyr")
self.assert_json_success(result)
history = result.json()["topics"]
self.assertEqual(history, [])
def test_topics_history(self) -> None:
# verified: int(UserMessage.flags.read) == 1
user_profile = self.example_user("iago")
self.login_user(user_profile)
stream_name = "Verona"
stream = get_stream(stream_name, user_profile.realm)
recipient = stream.recipient
def create_test_message(topic: str) -> int:
# TODO: Clean this up to send messages the normal way.
hamlet = self.example_user("hamlet")
message = Message(
sender=hamlet,
recipient=recipient,
content="whatever",
date_sent=timezone_now(),
sending_client=get_client("whatever"),
)
message.set_topic_name(topic)
message.save()
UserMessage.objects.create(
user_profile=user_profile,
message=message,
flags=0,
)
return message.id
# our most recent topics are topic0, topic1, topic2
# Create old messages with strange spellings.
create_test_message("topic2")
create_test_message("toPIc1")
create_test_message("toPIc0")
create_test_message("topic2")
create_test_message("topic2")
create_test_message("Topic2")
# Create new messages
topic2_msg_id = create_test_message("topic2")
create_test_message("topic1")
create_test_message("topic1")
topic1_msg_id = create_test_message("topic1")
topic0_msg_id = create_test_message("topic0")
endpoint = f"/json/users/me/{stream.id}/topics"
result = self.client_get(endpoint, {})
self.assert_json_success(result)
history = result.json()["topics"]
# We only look at the most recent three topics, because
# the prior fixture data may be unreliable.
history = history[:3]
self.assertEqual(
[topic["name"] for topic in history],
[
"topic0",
"topic1",
"topic2",
],
)
self.assertEqual(
[topic["max_id"] for topic in history],
[
topic0_msg_id,
topic1_msg_id,
topic2_msg_id,
],
)
# Now try as cordelia, who we imagine as a totally new user in
# that she doesn't have UserMessage rows. We should see the
# same results for a public stream.
self.login("cordelia")
result = self.client_get(endpoint, {})
self.assert_json_success(result)
history = result.json()["topics"]
# We only look at the most recent three topics, because
# the prior fixture data may be unreliable.
history = history[:3]
self.assertEqual(
[topic["name"] for topic in history],
[
"topic0",
"topic1",
"topic2",
],
)
self.assertIn("topic0", [topic["name"] for topic in history])
self.assertEqual(
[topic["max_id"] for topic in history],
[
topic0_msg_id,
topic1_msg_id,
topic2_msg_id,
],
)
# Now make stream private, but subscribe cordelia
do_change_stream_invite_only(stream, True)
self.subscribe(self.example_user("cordelia"), stream.name)
result = self.client_get(endpoint, {})
self.assert_json_success(result)
history = result.json()["topics"]
history = history[:3]
# Cordelia doesn't have these recent history items when we
# wasn't subscribed in her results.
self.assertNotIn("topic0", [topic["name"] for topic in history])
self.assertNotIn("topic1", [topic["name"] for topic in history])
self.assertNotIn("topic2", [topic["name"] for topic in history])
def test_bad_stream_id(self) -> None:
self.login("iago")
# non-sensible stream id
endpoint = "/json/users/me/9999999999/topics"
result = self.client_get(endpoint, {})
self.assert_json_error(result, "Invalid stream id")
# out of realm
bad_stream = self.make_stream(
"mit_stream",
realm=get_realm("zephyr"),
)
endpoint = f"/json/users/me/{bad_stream.id}/topics"
result = self.client_get(endpoint, {})
self.assert_json_error(result, "Invalid stream id")
# private stream to which I am not subscribed
private_stream = self.make_stream(
"private_stream",
invite_only=True,
)
endpoint = f"/json/users/me/{private_stream.id}/topics"
result = self.client_get(endpoint, {})
self.assert_json_error(result, "Invalid stream id")
def test_get_topics_web_public_stream_web_public_request(self) -> None:
stream = self.make_stream("web-public-steram", is_web_public=True)
for i in range(3):
self.send_stream_message(
self.example_user("iago"), stream.name, topic_name="topic" + str(i)
)
endpoint = f"/json/users/me/{stream.id}/topics"
result = self.client_get(endpoint)
self.assert_json_success(result)
history = result.json()["topics"]
self.assertEqual(
[topic["name"] for topic in history],
[
"topic2",
"topic1",
"topic0",
],
)
def test_get_topics_non_web_public_stream_web_public_request(self) -> None:
stream = get_stream("Verona", self.example_user("iago").realm)
endpoint = f"/json/users/me/{stream.id}/topics"
result = self.client_get(endpoint)
self.assert_json_error(result, "Invalid stream id", 400)
def test_get_topics_non_existant_stream_web_public_request(self) -> None:
non_existant_stream_id = 10000000000000000000000
endpoint = f"/json/users/me/{non_existant_stream_id}/topics"
result = self.client_get(endpoint)
self.assert_json_error(result, "Invalid stream id", 400)
class TopicDeleteTest(ZulipTestCase):
def test_topic_delete(self) -> None:
initial_last_msg_id = self.get_last_message().id
stream_name = "new_stream"
topic_name = "new topic 2"
# NON-ADMIN USER
user_profile = self.example_user("hamlet")
self.subscribe(user_profile, stream_name)
# Send message
stream = get_stream(stream_name, user_profile.realm)
self.send_stream_message(user_profile, stream_name, topic_name=topic_name)
last_msg_id = self.send_stream_message(user_profile, stream_name, topic_name=topic_name)
# Deleting the topic
self.login_user(user_profile)
endpoint = "/json/streams/" + str(stream.id) + "/delete_topic"
result = self.client_post(
endpoint,
{
"topic_name": topic_name,
},
)
self.assert_json_error(result, "Must be an organization administrator")
self.assertEqual(self.get_last_message().id, last_msg_id)
# Make stream private with limited history
do_change_stream_invite_only(stream, invite_only=True, history_public_to_subscribers=False)
# ADMIN USER subscribed now
user_profile = self.example_user("iago")
self.subscribe(user_profile, stream_name)
self.login_user(user_profile)
new_last_msg_id = self.send_stream_message(user_profile, stream_name, topic_name=topic_name)
# Now admin deletes all messages in topic -- which should only
# delete new_last_msg_id, i.e. the one sent since they joined.
self.assertEqual(self.get_last_message().id, new_last_msg_id)
result = self.client_post(
endpoint,
{
"topic_name": topic_name,
},
)
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, last_msg_id)
# Try to delete all messages in the topic again. There are no messages accessible
# to the administrator, so this should do nothing.
result = self.client_post(
endpoint,
{
"topic_name": topic_name,
},
)
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, last_msg_id)
# Make the stream's history public to subscribers
do_change_stream_invite_only(stream, invite_only=True, history_public_to_subscribers=True)
# Delete the topic should now remove all messages
result = self.client_post(
endpoint,
{
"topic_name": topic_name,
},
)
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, initial_last_msg_id)
# Delete again, to test the edge case of deleting an empty topic.
result = self.client_post(
endpoint,
{
"topic_name": topic_name,
},
)
self.assert_json_success(result)
self.assertEqual(self.get_last_message().id, initial_last_msg_id)
|
{
"content_hash": "d87c070fbe8417db0b3e268349cb6888",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 100,
"avg_line_length": 36.47038327526133,
"alnum_prop": 0.5819241425432311,
"repo_name": "hackerkid/zulip",
"id": "737ec4a0cdf14732dd66d95ad8d1f2e8c0171cb0",
"size": "10467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/tests/test_message_topics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "397271"
},
{
"name": "Dockerfile",
"bytes": "2939"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "717106"
},
{
"name": "JavaScript",
"bytes": "3079595"
},
{
"name": "Perl",
"bytes": "398763"
},
{
"name": "Puppet",
"bytes": "71210"
},
{
"name": "Python",
"bytes": "6876664"
},
{
"name": "Ruby",
"bytes": "6110"
},
{
"name": "Shell",
"bytes": "119833"
},
{
"name": "TypeScript",
"bytes": "14645"
}
],
"symlink_target": ""
}
|
"""
Initialization file for extract library directory.
This library handles extracting data from the Twitter API and outputting
to a staging area as a CSV, so that it can be transformed and then loaded into
the DB later.
If extract and load steps happened in one command, then an error on writing
to the db would mean losing fetched tweets in memory, making it hard to
inspect the data and rebuild the SQL. Therefore, data is written out to a CSV
with minimal processing. Note that writing out data is slow, therefore we
write out rows in a single write action for a batch of objects provided.
"""
|
{
"content_hash": "c5cfde12452bc699ff0b046c34d9d614",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 78,
"avg_line_length": 46.15384615384615,
"alnum_prop": 0.79,
"repo_name": "MichaelCurrin/twitterverse",
"id": "2b000f5e89f4974255bab7f2b01af5079bf5777e",
"size": "600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/lib/extract/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1345"
},
{
"name": "PLpgSQL",
"bytes": "523"
},
{
"name": "Python",
"bytes": "292539"
},
{
"name": "Shell",
"bytes": "4301"
},
{
"name": "TSQL",
"bytes": "15142"
}
],
"symlink_target": ""
}
|
def packBounds(xMin, xMax, yMin, yMax):
return [[xMin, xMax], [yMin, yMax]]
def unpackBounds(bounds):
xMin, xMax = bounds[0]
yMin, yMax = bounds[1]
return [xMin, xMax, yMin, yMax]
def nextMove(width, height, x, y, direction, bounds):
xMin, xMax, yMin, yMax = unpackBounds(bounds)
if direction == "U":
yMax = y
elif direction == "UR":
xMin = x
yMax = y
elif direction == "R":
xMin = x
elif direction == "DR":
xMin = x
yMin = y
elif direction == "D":
yMin = y
elif direction == "DL":
xMax = x
yMin = y
elif direction == "L":
xMax = x
elif direction == "UL":
yMax = y
xMax = x
if "U" in direction or "D" in direction:
y = (yMax - yMin) / 2 + yMin
if "L" in direction or "R" in direction:
x = (xMax - xMin) / 2 + xMin
return [x, y, packBounds(xMin, xMax, yMin, yMax)]
# width: width of the building.
# height: height of the building.
width, height = [int(i) for i in raw_input().split()]
N = int(raw_input()) # maximum number of turns before game over.
x, y = [int(i) for i in raw_input().split()]
xMin = 0
yMin = 0
xMax = width
yMax = height
bounds = packBounds(xMin, xMax, yMin, yMax)
# Game loop
while True:
# The direction of the bombs from batman's current location (U, UR, R, DR, D, DL, L or UL)
direction = raw_input()
x, y, bounds = nextMove(width, height, x, y, direction, bounds)
print str(x) + " " + str(y)
|
{
"content_hash": "41d9054e0772373609c67cfdc55bc340",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 91,
"avg_line_length": 23.620689655172413,
"alnum_prop": 0.6248175182481752,
"repo_name": "AntoineAugusti/katas",
"id": "6f9e049732895a3808215a8c50b4dcf332f0fff8",
"size": "1370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codingame/medium/heat_detector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "2728"
},
{
"name": "Java",
"bytes": "5700"
},
{
"name": "Python",
"bytes": "78940"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('EOSS', '0009_auto_20210401_1737'),
]
operations = [
migrations.AddField(
model_name='eosscontext',
name='vassar_ready',
field=models.BooleanField(default=False),
),
]
|
{
"content_hash": "ae72555f7af1d2225f0155ae236184c9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 53,
"avg_line_length": 21.625,
"alnum_prop": 0.5809248554913294,
"repo_name": "seakers/daphne_brain",
"id": "41c872e3148b73106d15f045b9eddb7ae347a5d7",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "EOSS/migrations/0010_eosscontext_vassar_ready.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "1683352"
},
{
"name": "Python",
"bytes": "1557398"
},
{
"name": "Shell",
"bytes": "4153"
}
],
"symlink_target": ""
}
|
"""
Support for controlling GPIO pins of a Raspberry Pi.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/rpi_gpio/
"""
# pylint: disable=import-error
import logging
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP)
REQUIREMENTS = ['RPi.GPIO==0.6.1']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'rpi_gpio'
def setup(hass, config):
"""Set up the Raspberry PI GPIO component."""
from RPi import GPIO
def cleanup_gpio(event):
"""Stuff to do before stopping."""
GPIO.cleanup()
def prepare_gpio(event):
"""Stuff to do when home assistant starts."""
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_gpio)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, prepare_gpio)
GPIO.setmode(GPIO.BCM)
return True
def setup_output(port):
"""Set up a GPIO as output."""
from RPi import GPIO
GPIO.setup(port, GPIO.OUT)
def setup_input(port, pull_mode):
"""Set up a GPIO as input."""
from RPi import GPIO
GPIO.setup(port, GPIO.IN,
GPIO.PUD_DOWN if pull_mode == 'DOWN' else GPIO.PUD_UP)
def write_output(port, value):
"""Write a value to a GPIO."""
from RPi import GPIO
GPIO.output(port, value)
def read_input(port):
"""Read a value from a GPIO."""
from RPi import GPIO
return GPIO.input(port)
def edge_detect(port, event_callback, bounce):
"""Add detection for RISING and FALLING events."""
from RPi import GPIO
GPIO.add_event_detect(
port,
GPIO.BOTH,
callback=event_callback,
bouncetime=bounce)
|
{
"content_hash": "dfb780cac94dcbb54e16bdd148619b0c",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 75,
"avg_line_length": 24.28985507246377,
"alnum_prop": 0.6610978520286396,
"repo_name": "persandstrom/home-assistant",
"id": "824ec46d636d3ceb5a6e725aab7db7dedc21aaa1",
"size": "1676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/rpi_gpio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
}
|
import collections
import importlib
import sys
import os
import os.path
import tempfile
import subprocess
import py_compile
import contextlib
import shutil
import zipfile
from importlib.util import source_from_cache
from test.support import make_legacy_pyc, strip_python_stderr
# Cached result of the expensive test performed in the function below.
__cached_interp_requires_environment = None
def interpreter_requires_environment():
"""
Returns True if our sys.executable interpreter requires environment
variables in order to be able to run at all.
This is designed to be used with @unittest.skipIf() to annotate tests
that need to use an assert_python*() function to launch an isolated
mode (-I) or no environment mode (-E) sub-interpreter process.
A normal build & test does not run into this situation but it can happen
when trying to run the standard library test suite from an interpreter that
doesn't have an obvious home with Python's current home finding logic.
Setting PYTHONHOME is one way to get most of the testsuite to run in that
situation. PYTHONPATH or PYTHONUSERSITE are other common environment
variables that might impact whether or not the interpreter can start.
"""
global __cached_interp_requires_environment
if __cached_interp_requires_environment is None:
# Try running an interpreter with -E to see if it works or not.
try:
subprocess.check_call([sys.executable, '-E',
'-c', 'import sys; sys.exit(0)'])
except subprocess.CalledProcessError:
__cached_interp_requires_environment = True
else:
__cached_interp_requires_environment = False
return __cached_interp_requires_environment
_PythonRunResult = collections.namedtuple("_PythonRunResult",
("rc", "out", "err"))
# Executing the interpreter in a subprocess
def run_python_until_end(*args, **env_vars):
env_required = interpreter_requires_environment()
if '__isolated' in env_vars:
isolated = env_vars.pop('__isolated')
else:
isolated = not env_vars and not env_required
cmd_line = [sys.executable, '-X', 'faulthandler']
if isolated:
# isolated mode: ignore Python environment variables, ignore user
# site-packages, and don't add the current directory to sys.path
cmd_line.append('-I')
elif not env_vars and not env_required:
# ignore Python environment variables
cmd_line.append('-E')
# Need to preserve the original environment, for in-place testing of
# shared library builds.
env = os.environ.copy()
# set TERM='' unless the TERM environment variable is passed explicitly
# see issues #11390 and #18300
if 'TERM' not in env_vars:
env['TERM'] = ''
# But a special flag that can be set to override -- in this case, the
# caller is responsible to pass the full environment.
if env_vars.pop('__cleanenv', None):
env = {}
env.update(env_vars)
cmd_line.extend(args)
proc = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
with proc:
try:
out, err = proc.communicate()
finally:
proc.kill()
subprocess._cleanup()
rc = proc.returncode
err = strip_python_stderr(err)
return _PythonRunResult(rc, out, err), cmd_line
def _assert_python(expected_success, *args, **env_vars):
res, cmd_line = run_python_until_end(*args, **env_vars)
if (res.rc and expected_success) or (not res.rc and not expected_success):
# Limit to 80 lines to ASCII characters
maxlen = 80 * 100
out, err = res.out, res.err
if len(out) > maxlen:
out = b'(... truncated stdout ...)' + out[-maxlen:]
if len(err) > maxlen:
err = b'(... truncated stderr ...)' + err[-maxlen:]
out = out.decode('ascii', 'replace').rstrip()
err = err.decode('ascii', 'replace').rstrip()
raise AssertionError("Process return code is %d\n"
"command line: %r\n"
"\n"
"stdout:\n"
"---\n"
"%s\n"
"---\n"
"\n"
"stderr:\n"
"---\n"
"%s\n"
"---"
% (res.rc, cmd_line,
out,
err))
return res
def assert_python_ok(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` succeeds (rc == 0) and return a (return code, stdout,
stderr) tuple.
If the __cleanenv keyword is set, env_vars is used as a fresh environment.
Python is started in isolated mode (command line option -I),
except if the __isolated keyword is set to False.
"""
return _assert_python(True, *args, **env_vars)
def assert_python_failure(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` fails (rc != 0) and return a (return code, stdout,
stderr) tuple.
See assert_python_ok() for more options.
"""
return _assert_python(False, *args, **env_vars)
def spawn_python(*args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kw):
"""Run a Python subprocess with the given arguments.
kw is extra keyword args to pass to subprocess.Popen. Returns a Popen
object.
"""
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
# Under Fedora (?), GNU readline can output junk on stderr when initialized,
# depending on the TERM setting. Setting TERM=vt100 is supposed to disable
# that. References:
# - http://reinout.vanrees.org/weblog/2009/08/14/readline-invisible-character-hack.html
# - http://stackoverflow.com/questions/15760712/python-readline-module-prints-escape-character-during-import
# - http://lists.gnu.org/archive/html/bug-readline/2007-08/msg00004.html
env = kw.setdefault('env', dict(os.environ))
env['TERM'] = 'vt100'
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=stdout, stderr=stderr,
**kw)
def kill_python(p):
"""Run the given Popen process until completion and return stdout."""
p.stdin.close()
data = p.stdout.read()
p.stdout.close()
# try to cleanup the child so we don't appear to leak when running
# with regrtest -R.
p.wait()
subprocess._cleanup()
return data
def make_script(script_dir, script_basename, source, omit_suffix=False):
script_filename = script_basename
if not omit_suffix:
script_filename += os.extsep + 'py'
script_name = os.path.join(script_dir, script_filename)
# The script should be encoded to UTF-8, the default string encoding
script_file = open(script_name, 'w', encoding='utf-8')
script_file.write(source)
script_file.close()
importlib.invalidate_caches()
return script_name
def make_zip_script(zip_dir, zip_basename, script_name, name_in_zip=None):
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
if name_in_zip is None:
parts = script_name.split(os.sep)
if len(parts) >= 2 and parts[-2] == '__pycache__':
legacy_pyc = make_legacy_pyc(source_from_cache(script_name))
name_in_zip = os.path.basename(legacy_pyc)
script_name = legacy_pyc
else:
name_in_zip = os.path.basename(script_name)
zip_file.write(script_name, name_in_zip)
zip_file.close()
#if test.support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, name_in_zip)
def make_pkg(pkg_dir, init_source=''):
os.mkdir(pkg_dir)
make_script(pkg_dir, '__init__', init_source)
def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth=1, compiled=False):
unlink = []
init_name = make_script(zip_dir, '__init__', '')
unlink.append(init_name)
init_basename = os.path.basename(init_name)
script_name = make_script(zip_dir, script_basename, source)
unlink.append(script_name)
if compiled:
init_name = py_compile.compile(init_name, doraise=True)
script_name = py_compile.compile(script_name, doraise=True)
unlink.extend((init_name, script_name))
pkg_names = [os.sep.join([pkg_name]*i) for i in range(1, depth+1)]
script_name_in_zip = os.path.join(pkg_names[-1], os.path.basename(script_name))
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
for name in pkg_names:
init_name_in_zip = os.path.join(name, init_basename)
zip_file.write(init_name, init_name_in_zip)
zip_file.write(script_name, script_name_in_zip)
zip_file.close()
for name in unlink:
os.unlink(name)
#if test.support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, script_name_in_zip)
|
{
"content_hash": "240643f2fff3664ed301ae31df77125d",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 112,
"avg_line_length": 39.66122448979592,
"alnum_prop": 0.61912112792014,
"repo_name": "MalloyPower/parsing-python",
"id": "80889b17f3f376d875cc57d1a41d4f0c1653843a",
"size": "9843",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.6.0/Lib/test/support/script_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name = 'posical',
py_modules = ['posical'],
version = '0.1.4',
description = 'Calendar reform for Python',
author = 'Z. D. Smith',
author_email = 'zd@zdsmith.com',
url = 'https://github.com/subsetpark/posical',
download_url = 'https://github.com/subsetpark/posical/releases/tag/0.1',
install_requires = 'nonzero',
keywords = ['calendar', 'comte', 'date', 'humanism'],
classifiers = [],
)
|
{
"content_hash": "8f193dfd1618a93fba52c159c05e1783",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 74,
"avg_line_length": 32.07142857142857,
"alnum_prop": 0.6570155902004454,
"repo_name": "subsetpark/posical",
"id": "b6713da95555b6349b7b1979fc8b24e7f411ea24",
"size": "449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "20624"
}
],
"symlink_target": ""
}
|
"""Functions to plot epochs data
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Jaakko Leppakangas <jaeilepp@student.jyu.fi>
#
# License: Simplified BSD
from functools import partial
import copy
import warnings
import numpy as np
from ..utils import verbose, get_config, set_config, logger
from ..io.pick import pick_types, channel_type
from ..io.proj import setup_proj
from ..fixes import Counter, _in1d
from ..time_frequency import psd_multitaper
from .utils import (tight_layout, figure_nobar, _toggle_proj, _toggle_options,
_layout_figure, _setup_vmin_vmax, _channels_changed,
_plot_raw_onscroll, _onclick_help, plt_show)
from ..defaults import _handle_default
def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap='RdBu_r',
fig=None, overlay_times=None):
"""Plot Event Related Potential / Fields image
Parameters
----------
epochs : instance of Epochs
The epochs
picks : int | array-like of int | None
The indices of the channels to consider. If None, the first
five good channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers
colorbar : bool
Display or not a colorbar
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`
cmap : matplotlib colormap
Colormap.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two axes for
drawing the single trials and evoked responses. If None a new figure is
created. Defaults to None.
overlay_times : array-like, shape (n_epochs,) | None
If not None the parameter is interpreted as time instants in seconds
and is added to the image. It is typically useful to display reaction
times. Note that it is defined with respect to the order
of epochs such that overlay_times[0] corresponds to epochs[0].
Returns
-------
figs : the list of matplotlib figures
One figure per channel displayed
"""
from scipy import ndimage
units = _handle_default('units', units)
scalings = _handle_default('scalings', scalings)
import matplotlib.pyplot as plt
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')[:5]
if set(units.keys()) != set(scalings.keys()):
raise ValueError('Scalings and units must have the same keys.')
picks = np.atleast_1d(picks)
if fig is not None and len(picks) > 1:
raise ValueError('Only single pick can be drawn to a figure.')
evoked = epochs.average(picks)
data = epochs.get_data()[:, picks, :]
scale_vmin = True if vmin is None else False
scale_vmax = True if vmax is None else False
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
if overlay_times is not None and len(overlay_times) != len(data):
raise ValueError('size of overlay_times parameter (%s) do not '
'match the number of epochs (%s).'
% (len(overlay_times), len(data)))
if overlay_times is not None:
overlay_times = np.array(overlay_times)
times_min = np.min(overlay_times)
times_max = np.max(overlay_times)
if ((times_min < epochs.tmin) or (times_max > epochs.tmax)):
warnings.warn('Some values in overlay_times fall outside of '
'the epochs time interval (between %s s and %s s)' %
(epochs.tmin, epochs.tmax))
figs = list()
for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
if fig is None:
this_fig = plt.figure()
else:
this_fig = fig
figs.append(this_fig)
ch_type = channel_type(epochs.info, idx)
if ch_type not in scalings:
# We know it's not in either scalings or units since keys match
raise KeyError('%s type not in scalings and units' % ch_type)
this_data *= scalings[ch_type]
this_order = order
if callable(order):
this_order = order(epochs.times, this_data)
if this_order is not None and (len(this_order) != len(this_data)):
raise ValueError('size of order parameter (%s) does not '
'match the number of epochs (%s).'
% (len(this_order), len(this_data)))
this_overlay_times = None
if overlay_times is not None:
this_overlay_times = overlay_times
if this_order is not None:
this_order = np.asarray(this_order)
this_data = this_data[this_order]
if this_overlay_times is not None:
this_overlay_times = this_overlay_times[this_order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma,
axis=0)
plt.figure(this_fig.number)
ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
if scale_vmin:
vmin *= scalings[ch_type]
if scale_vmax:
vmax *= scalings[ch_type]
im = ax1.imshow(this_data,
extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1],
0, len(data)],
aspect='auto', origin='lower', interpolation='nearest',
vmin=vmin, vmax=vmax, cmap=cmap)
if this_overlay_times is not None:
plt.plot(1e3 * this_overlay_times, 0.5 + np.arange(len(this_data)),
'k', linewidth=2)
ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
if colorbar:
ax3 = plt.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
ax1.set_title(epochs.ch_names[idx])
ax1.set_ylabel('Epochs')
ax1.axis('auto')
ax1.axis('tight')
ax1.axvline(0, color='m', linewidth=3, linestyle='--')
evoked_data = scalings[ch_type] * evoked.data[i]
ax2.plot(1e3 * evoked.times, evoked_data)
ax2.set_xlabel('Time (ms)')
ax2.set_xlim([1e3 * evoked.times[0], 1e3 * evoked.times[-1]])
ax2.set_ylabel(units[ch_type])
evoked_vmin = min(evoked_data) * 1.1 if scale_vmin else vmin
evoked_vmax = max(evoked_data) * 1.1 if scale_vmax else vmax
if scale_vmin or scale_vmax:
evoked_vmax = max(np.abs([evoked_vmax, evoked_vmin]))
evoked_vmin = -evoked_vmax
ax2.set_ylim([evoked_vmin, evoked_vmax])
ax2.axvline(0, color='m', linewidth=3, linestyle='--')
if colorbar:
plt.colorbar(im, cax=ax3)
tight_layout(fig=this_fig)
plt_show(show)
return figs
def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',),
show=True):
"""Show the channel stats based on a drop_log from Epochs
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
from ..epochs import _drop_log_stats
perc = _drop_log_stats(drop_log, ignore)
scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
ch_names = np.array(list(scores.keys()))
fig = plt.figure()
if perc < threshold or len(ch_names) == 0:
plt.text(0, 0, 'No drops')
return fig
n_used = 0
for d in drop_log: # "d" is the list of drop reasons for each epoch
if len(d) == 0 or any(ch not in ignore for ch in d):
n_used += 1 # number of epochs not ignored
counts = 100 * np.array(list(scores.values()), dtype=float) / n_used
n_plot = min(n_max_plot, len(ch_names))
order = np.flipud(np.argsort(counts))
plt.title('%s: %0.1f%%' % (subject, perc))
x = np.arange(n_plot)
plt.bar(x, counts[order[:n_plot]], color=color, width=width)
plt.xticks(x + width / 2.0, ch_names[order[:n_plot]], rotation=45,
horizontalalignment='right')
plt.tick_params(axis='x', which='major', labelsize=10)
plt.ylabel('% of epochs rejected')
plt.xlim((-width / 2.0, (n_plot - 1) + width * 3 / 2))
plt.grid(True, axis='y')
plt_show(show)
return fig
def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
title_str, axes_handler):
"""Aux functioin"""
this = axes_handler[0]
for ii, data_, ax in zip(epoch_idx, data, axes):
for l, d in zip(ax.lines, data_[good_ch_idx]):
l.set_data(times, d)
if bad_ch_idx is not None:
bad_lines = [ax.lines[k] for k in bad_ch_idx]
for l, d in zip(bad_lines, data_[bad_ch_idx]):
l.set_data(times, d)
if title_str is not None:
ax.set_title(title_str % ii, fontsize=12)
ax.set_ylim(data.min(), data.max())
ax.set_yticks(list())
ax.set_xticks(list())
if vars(ax)[this]['reject'] is True:
# memorizing reject
for l in ax.lines:
l.set_color((0.8, 0.8, 0.8))
ax.get_figure().canvas.draw()
else:
# forgetting previous reject
for k in axes_handler:
if k == this:
continue
if vars(ax).get(k, {}).get('reject', None) is True:
for l in ax.lines[:len(good_ch_idx)]:
l.set_color('k')
if bad_ch_idx is not None:
for l in ax.lines[-len(bad_ch_idx):]:
l.set_color('r')
ax.get_figure().canvas.draw()
break
def _epochs_navigation_onclick(event, params):
"""Aux function"""
import matplotlib.pyplot as plt
p = params
here = None
if event.inaxes == p['back'].ax:
here = 1
elif event.inaxes == p['next'].ax:
here = -1
elif event.inaxes == p['reject-quit'].ax:
if p['reject_idx']:
p['epochs'].drop_epochs(p['reject_idx'])
plt.close(p['fig'])
plt.close(event.inaxes.get_figure())
if here is not None:
p['idx_handler'].rotate(here)
p['axes_handler'].rotate(here)
this_idx = p['idx_handler'][0]
_draw_epochs_axes(this_idx, p['good_ch_idx'], p['bad_ch_idx'],
p['data'][this_idx],
p['times'], p['axes'], p['title_str'],
p['axes_handler'])
# XXX don't ask me why
p['axes'][0].get_figure().canvas.draw()
def _epochs_axes_onclick(event, params):
"""Aux function"""
reject_color = (0.8, 0.8, 0.8)
ax = event.inaxes
if event.inaxes is None:
return
p = params
here = vars(ax)[p['axes_handler'][0]]
if here.get('reject', None) is False:
idx = here['idx']
if idx not in p['reject_idx']:
p['reject_idx'].append(idx)
for l in ax.lines:
l.set_color(reject_color)
here['reject'] = True
elif here.get('reject', None) is True:
idx = here['idx']
if idx in p['reject_idx']:
p['reject_idx'].pop(p['reject_idx'].index(idx))
good_lines = [ax.lines[k] for k in p['good_ch_idx']]
for l in good_lines:
l.set_color('k')
if p['bad_ch_idx'] is not None:
bad_lines = ax.lines[-len(p['bad_ch_idx']):]
for l in bad_lines:
l.set_color('r')
here['reject'] = False
ax.get_figure().canvas.draw()
def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20,
n_channels=20, title=None, show=True, block=False):
""" Visualize epochs
Bad epochs can be marked with a left click on top of the epoch. Bad
channels can be selected by clicking the channel name on the left side of
the main axes. Calling this function drops all the selected bad epochs as
well as bad epochs marked beforehand with rejection parameters.
Parameters
----------
epochs : instance of Epochs
The epochs object
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
scalings : dict | None
Scale factors for the traces. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)
n_epochs : int
The number of epochs per view. Defaults to 20.
n_channels : int
The number of channels per view. Defaults to 20.
title : str | None
The title of the window. If None, epochs name will be displayed.
Defaults to None.
show : bool
Show figure if True. Defaults to True
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on an epoch.
Defaults to False.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
Notes
-----
The arrow keys (up/down/left/right) can be used to navigate between
channels and epochs and the scaling can be adjusted with - and + (or =)
keys, but this depends on the backend matplotlib is configured to use
(e.g., mpl.use(``TkAgg``) should work). Full screen mode can be toggled
with f11 key. The amount of epochs and channels per view can be adjusted
with home/end and page down/page up keys. Butterfly plot can be toggled
with ``b`` key. Right mouse click adds a vertical line to the plot.
"""
epochs.drop_bad_epochs()
scalings = _handle_default('scalings_plot_raw', scalings)
projs = epochs.info['projs']
params = {'epochs': epochs,
'info': copy.deepcopy(epochs.info),
'bad_color': (0.8, 0.8, 0.8),
't_start': 0,
'histogram': None}
params['label_click_fun'] = partial(_pick_bad_channels, params=params)
_prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks)
_prepare_projectors(params)
_layout_figure(params)
callback_close = partial(_close_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
@verbose
def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None,
proj=False, bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, ax=None, color='black',
area_mode='std', area_alpha=0.33, dB=True, n_jobs=1,
show=True, verbose=None):
"""Plot the power spectral density across epochs
Parameters
----------
epochs : instance of Epochs
The epochs object
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
picks : array-like of int | None
List of channels to use.
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted.
area_alpha : float
Alpha for the area.
dB : bool
If True, transform data to decibels.
n_jobs : int
Number of jobs to run in parallel.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
from .raw import _set_psd_plot_params
fig, picks_list, titles_list, ax_list, make_label = _set_psd_plot_params(
epochs.info, proj, picks, ax, area_mode)
for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
ax_list)):
psds, freqs = psd_multitaper(epochs, picks=picks, fmin=fmin,
fmax=fmax, tmin=tmin, tmax=tmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias,
normalization=normalization, proj=proj,
n_jobs=n_jobs)
# Convert PSDs to dB
if dB:
psds = 10 * np.log10(psds)
unit = 'dB'
else:
unit = 'power'
# mean across epochs and channels
psd_mean = np.mean(psds, axis=0).mean(axis=0)
if area_mode == 'std':
# std across channels
psd_std = np.std(np.mean(psds, axis=0), axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(np.mean(psds, axis=0), axis=0),
np.max(np.mean(psds, axis=0), axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color)
if hyp_limits is not None:
ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
color=color, alpha=area_alpha)
if make_label:
if ii == len(picks_list) - 1:
ax.set_xlabel('Freq (Hz)')
if ii == len(picks_list) // 2:
ax.set_ylabel('Power Spectral Density (%s/Hz)' % unit)
ax.set_title(title)
ax.set_xlim(freqs[0], freqs[-1])
if make_label:
tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
plt_show(show)
return fig
def _prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks, order=None):
"""Helper for setting up the mne_browse_epochs window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
epochs = params['epochs']
if picks is None:
picks = _handle_picks(epochs)
if len(picks) < 1:
raise RuntimeError('No appropriate channels found. Please'
' check your picks')
picks = sorted(picks)
# Reorganize channels
inds = list()
types = list()
for t in ['grad', 'mag']:
idxs = pick_types(params['info'], meg=t, ref_meg=False, exclude=[])
if len(idxs) < 1:
continue
mask = _in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [t] * len(inds[-1])
pick_kwargs = dict(meg=False, ref_meg=False, exclude=[])
if order is None:
order = ['eeg', 'eog', 'ecg', 'emg', 'ref_meg', 'stim', 'resp', 'misc',
'chpi', 'syst', 'ias', 'exci']
for ch_type in order:
pick_kwargs[ch_type] = True
idxs = pick_types(params['info'], **pick_kwargs)
if len(idxs) < 1:
continue
mask = _in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [ch_type] * len(inds[-1])
pick_kwargs[ch_type] = False
inds = np.concatenate(inds).astype(int)
if not len(inds) == len(picks):
raise RuntimeError('Some channels not classified. Please'
' check your picks')
ch_names = [params['info']['ch_names'][x] for x in inds]
# set up plotting
size = get_config('MNE_BROWSE_RAW_SIZE')
n_epochs = min(n_epochs, len(epochs.events))
duration = len(epochs.times) * n_epochs
n_channels = min(n_channels, len(picks))
if size is not None:
size = size.split(',')
size = tuple(float(s) for s in size)
if title is None:
title = epochs.name
if epochs.name is None or len(title) == 0:
title = ''
fig = figure_nobar(facecolor='w', figsize=size, dpi=80)
fig.canvas.set_window_title('mne_browse_epochs')
ax = plt.subplot2grid((10, 15), (0, 1), colspan=13, rowspan=9)
ax.annotate(title, xy=(0.5, 1), xytext=(0, ax.get_ylim()[1] + 15),
ha='center', va='bottom', size=12, xycoords='axes fraction',
textcoords='offset points')
color = _handle_default('color', None)
ax.axis([0, duration, 0, 200])
ax2 = ax.twiny()
ax2.set_zorder(-1)
ax2.axis([0, duration, 0, 200])
ax_hscroll = plt.subplot2grid((10, 15), (9, 1), colspan=13)
ax_hscroll.get_yaxis().set_visible(False)
ax_hscroll.set_xlabel('Epochs')
ax_vscroll = plt.subplot2grid((10, 15), (0, 14), rowspan=9)
ax_vscroll.set_axis_off()
ax_vscroll.add_patch(mpl.patches.Rectangle((0, 0), 1, len(picks),
facecolor='w', zorder=2))
ax_help_button = plt.subplot2grid((10, 15), (9, 0), colspan=1)
help_button = mpl.widgets.Button(ax_help_button, 'Help')
help_button.on_clicked(partial(_onclick_help, params=params))
# populate vertical and horizontal scrollbars
for ci in range(len(picks)):
if ch_names[ci] in params['info']['bads']:
this_color = params['bad_color']
else:
this_color = color[types[ci]]
ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
facecolor=this_color,
edgecolor=this_color,
zorder=3))
vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
edgecolor='w', facecolor='w', zorder=4)
ax_vscroll.add_patch(vsel_patch)
ax_vscroll.set_ylim(len(types), 0)
ax_vscroll.set_title('Ch.')
# populate colors list
type_colors = [colorConverter.to_rgba(color[c]) for c in types]
colors = list()
for color_idx in range(len(type_colors)):
colors.append([type_colors[color_idx]] * len(epochs.events))
lines = list()
n_times = len(epochs.times)
for ch_idx in range(n_channels):
if len(colors) - 1 < ch_idx:
break
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=2, picker=3.)
ax.add_collection(lc)
lines.append(lc)
times = epochs.times
data = np.zeros((params['info']['nchan'], len(times) * n_epochs))
ylim = (25., 0.) # Hardcoded 25 because butterfly has max 5 rows (5*5=25).
# make shells for plotting traces
offset = ylim[0] / n_channels
offsets = np.arange(n_channels) * offset + (offset / 2.)
times = np.arange(len(times) * len(epochs.events))
epoch_times = np.arange(0, len(times), n_times)
ax.set_yticks(offsets)
ax.set_ylim(ylim)
ticks = epoch_times + 0.5 * n_times
ax.set_xticks(ticks)
ax2.set_xticks(ticks[:n_epochs])
labels = list(range(1, len(ticks) + 1)) # epoch numbers
ax.set_xticklabels(labels)
ax2.set_xticklabels(labels)
xlim = epoch_times[-1] + len(epochs.times)
ax_hscroll.set_xlim(0, xlim)
vertline_t = ax_hscroll.text(0, 1, '', color='y', va='bottom', ha='right')
# fit horizontal scroll bar ticks
hscroll_ticks = np.arange(0, xlim, xlim / 7.0)
hscroll_ticks = np.append(hscroll_ticks, epoch_times[-1])
hticks = list()
for tick in hscroll_ticks:
hticks.append(epoch_times.flat[np.abs(epoch_times - tick).argmin()])
hlabels = [x / n_times + 1 for x in hticks]
ax_hscroll.set_xticks(hticks)
ax_hscroll.set_xticklabels(hlabels)
for epoch_idx in range(len(epoch_times)):
ax_hscroll.add_patch(mpl.patches.Rectangle((epoch_idx * n_times, 0),
n_times, 1, facecolor='w',
edgecolor='w', alpha=0.6))
hsel_patch = mpl.patches.Rectangle((0, 0), duration, 1,
edgecolor='k',
facecolor=(0.75, 0.75, 0.75),
alpha=0.25, linewidth=1, clip_on=False)
ax_hscroll.add_patch(hsel_patch)
text = ax.text(0, 0, 'blank', zorder=2, verticalalignment='baseline',
ha='left', fontweight='bold')
text.set_visible(False)
params.update({'fig': fig,
'ax': ax,
'ax2': ax2,
'ax_hscroll': ax_hscroll,
'ax_vscroll': ax_vscroll,
'vsel_patch': vsel_patch,
'hsel_patch': hsel_patch,
'lines': lines,
'projs': projs,
'ch_names': ch_names,
'n_channels': n_channels,
'n_epochs': n_epochs,
'scalings': scalings,
'duration': duration,
'ch_start': 0,
'colors': colors,
'def_colors': type_colors, # don't change at runtime
'picks': picks,
'bads': np.array(list(), dtype=int),
'data': data,
'times': times,
'epoch_times': epoch_times,
'offsets': offsets,
'labels': labels,
'scale_factor': 1.0,
'butterfly_scale': 1.0,
'fig_proj': None,
'types': np.array(types),
'inds': inds,
'vert_lines': list(),
'vertline_t': vertline_t,
'butterfly': False,
'text': text,
'ax_help_button': ax_help_button, # needed for positioning
'help_button': help_button, # reference needed for clicks
'fig_options': None,
'settings': [True, True, True, True],
'image_plot': None})
params['plot_fun'] = partial(_plot_traces, params=params)
# callbacks
callback_scroll = partial(_plot_onscroll, params=params)
fig.canvas.mpl_connect('scroll_event', callback_scroll)
callback_click = partial(_mouse_click, params=params)
fig.canvas.mpl_connect('button_press_event', callback_click)
callback_key = partial(_plot_onkey, params=params)
fig.canvas.mpl_connect('key_press_event', callback_key)
callback_resize = partial(_resize_event, params=params)
fig.canvas.mpl_connect('resize_event', callback_resize)
fig.canvas.mpl_connect('pick_event', partial(_onpick, params=params))
params['callback_key'] = callback_key
# Draw event lines for the first time.
_plot_vert_lines(params)
def _prepare_projectors(params):
""" Helper for setting up the projectors for epochs browser """
import matplotlib.pyplot as plt
import matplotlib as mpl
epochs = params['epochs']
projs = params['projs']
if len(projs) > 0 and not epochs.proj:
ax_button = plt.subplot2grid((10, 15), (9, 14))
opt_button = mpl.widgets.Button(ax_button, 'Proj')
callback_option = partial(_toggle_options, params=params)
opt_button.on_clicked(callback_option)
params['opt_button'] = opt_button
params['ax_button'] = ax_button
# As here code is shared with plot_evoked, some extra steps:
# first the actual plot update function
params['plot_update_proj_callback'] = _plot_update_epochs_proj
# then the toggle handler
callback_proj = partial(_toggle_proj, params=params)
# store these for use by callbacks in the options figure
params['callback_proj'] = callback_proj
callback_proj('none')
def _plot_traces(params):
""" Helper for plotting concatenated epochs """
params['text'].set_visible(False)
ax = params['ax']
butterfly = params['butterfly']
if butterfly:
ch_start = 0
n_channels = len(params['picks'])
data = params['data'] * params['butterfly_scale']
else:
ch_start = params['ch_start']
n_channels = params['n_channels']
data = params['data'] * params['scale_factor']
offsets = params['offsets']
lines = params['lines']
epochs = params['epochs']
n_times = len(epochs.times)
tick_list = list()
start_idx = int(params['t_start'] / n_times)
end = params['t_start'] + params['duration']
end_idx = int(end / n_times)
xlabels = params['labels'][start_idx:]
event_ids = params['epochs'].events[:, 2]
params['ax2'].set_xticklabels(event_ids[start_idx:])
ax.set_xticklabels(xlabels)
ylabels = ax.yaxis.get_ticklabels()
# do the plotting
for line_idx in range(n_channels):
ch_idx = line_idx + ch_start
if line_idx >= len(lines):
break
elif ch_idx < len(params['ch_names']):
if butterfly:
ch_type = params['types'][ch_idx]
if ch_type == 'grad':
offset = offsets[0]
elif ch_type == 'mag':
offset = offsets[1]
elif ch_type == 'eeg':
offset = offsets[2]
elif ch_type == 'eog':
offset = offsets[3]
elif ch_type == 'ecg':
offset = offsets[4]
else:
lines[line_idx].set_segments(list())
else:
tick_list += [params['ch_names'][ch_idx]]
offset = offsets[line_idx]
this_data = data[ch_idx]
# subtraction here gets correct orientation for flipped ylim
ydata = offset - this_data
xdata = params['times'][:params['duration']]
num_epochs = np.min([params['n_epochs'],
len(epochs.events)])
segments = np.split(np.array((xdata, ydata)).T, num_epochs)
ch_name = params['ch_names'][ch_idx]
if ch_name in params['info']['bads']:
if not butterfly:
this_color = params['bad_color']
ylabels[line_idx].set_color(this_color)
this_color = np.tile((params['bad_color']), (num_epochs, 1))
for bad_idx in params['bads']:
if bad_idx < start_idx or bad_idx > end_idx:
continue
this_color[bad_idx - start_idx] = (1., 0., 0.)
lines[line_idx].set_zorder(1)
else:
this_color = params['colors'][ch_idx][start_idx:end_idx]
lines[line_idx].set_zorder(2)
if not butterfly:
ylabels[line_idx].set_color('black')
lines[line_idx].set_segments(segments)
lines[line_idx].set_color(this_color)
else:
lines[line_idx].set_segments(list())
# finalize plot
ax.set_xlim(params['times'][0], params['times'][0] + params['duration'],
False)
params['ax2'].set_xlim(params['times'][0],
params['times'][0] + params['duration'], False)
if butterfly:
factor = -1. / params['butterfly_scale']
labels = np.empty(20, dtype='S15')
labels.fill('')
ticks = ax.get_yticks()
idx_offset = 1
if 'grad' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[0]) *
params['scalings']['grad'] *
1e13 * factor)
idx_offset += 4
if 'mag' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[1]) *
params['scalings']['mag'] *
1e15 * factor)
idx_offset += 4
if 'eeg' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[2]) *
params['scalings']['eeg'] *
1e6 * factor)
idx_offset += 4
if 'eog' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[3]) *
params['scalings']['eog'] *
1e6 * factor)
idx_offset += 4
if 'ecg' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[4]) *
params['scalings']['ecg'] *
1e6 * factor)
ax.set_yticklabels(labels, fontsize=12, color='black')
else:
ax.set_yticklabels(tick_list, fontsize=12)
params['vsel_patch'].set_y(ch_start)
params['fig'].canvas.draw()
# XXX This is a hack to make sure this figure gets drawn last
# so that when matplotlib goes to calculate bounds we don't get a
# CGContextRef error on the MacOSX backend :(
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _plot_update_epochs_proj(params, bools=None):
"""Helper only needs to be called when proj is changed"""
if bools is not None:
inds = np.where(bools)[0]
params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
for ii in inds]
params['proj_bools'] = bools
params['projector'], _ = setup_proj(params['info'], add_eeg_ref=False,
verbose=False)
start = int(params['t_start'] / len(params['epochs'].times))
n_epochs = params['n_epochs']
end = start + n_epochs
data = np.concatenate(params['epochs'][start:end].get_data(), axis=1)
if params['projector'] is not None:
data = np.dot(params['projector'], data)
types = params['types']
for pick, ind in enumerate(params['inds']):
params['data'][pick] = data[ind] / params['scalings'][types[pick]]
params['plot_fun']()
def _handle_picks(epochs):
"""Aux function to handle picks."""
if any('ICA' in k for k in epochs.ch_names):
picks = pick_types(epochs.info, misc=True, ref_meg=False,
exclude=[])
else:
picks = pick_types(epochs.info, meg=True, eeg=True, eog=True, ecg=True,
ref_meg=False, exclude=[])
return picks
def _plot_window(value, params):
"""Deal with horizontal shift of the viewport."""
max_times = len(params['times']) - params['duration']
if value > max_times:
value = len(params['times']) - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
params['plot_update_proj_callback'](params)
def _plot_vert_lines(params):
""" Helper function for plotting vertical lines."""
ax = params['ax']
while len(ax.lines) > 0:
ax.lines.pop()
params['vert_lines'] = list()
params['vertline_t'].set_text('')
epochs = params['epochs']
if params['settings'][3]: # if zeroline visible
t_zero = np.where(epochs.times == 0.)[0]
if len(t_zero) == 1:
for event_idx in range(len(epochs.events)):
pos = [event_idx * len(epochs.times) + t_zero[0],
event_idx * len(epochs.times) + t_zero[0]]
ax.plot(pos, ax.get_ylim(), 'g', zorder=3, alpha=0.4)
for epoch_idx in range(len(epochs.events)):
pos = [epoch_idx * len(epochs.times), epoch_idx * len(epochs.times)]
ax.plot(pos, ax.get_ylim(), color='black', linestyle='--', zorder=1)
def _pick_bad_epochs(event, params):
"""Helper for selecting / dropping bad epochs"""
if 'ica' in params:
pos = (event.xdata, event.ydata)
_pick_bad_channels(pos, params)
return
n_times = len(params['epochs'].times)
start_idx = int(params['t_start'] / n_times)
xdata = event.xdata
xlim = event.inaxes.get_xlim()
epoch_idx = start_idx + int(xdata / (xlim[1] / params['n_epochs']))
total_epochs = len(params['epochs'].events)
if epoch_idx > total_epochs - 1:
return
# remove bad epoch
if epoch_idx in params['bads']:
params['bads'] = params['bads'][(params['bads'] != epoch_idx)]
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = params['def_colors'][ch_idx]
params['ax_hscroll'].patches[epoch_idx].set_color('w')
params['ax_hscroll'].patches[epoch_idx].set_zorder(1)
params['plot_fun']()
return
# add bad epoch
params['bads'] = np.append(params['bads'], epoch_idx)
params['ax_hscroll'].patches[epoch_idx].set_color((1., 0., 0., 1.))
params['ax_hscroll'].patches[epoch_idx].set_zorder(2)
params['ax_hscroll'].patches[epoch_idx].set_edgecolor('w')
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = (1., 0., 0., 1.)
params['plot_fun']()
def _pick_bad_channels(pos, params):
"""Helper function for selecting bad channels."""
text, ch_idx = _label2idx(params, pos)
if text is None:
return
if text in params['info']['bads']:
while text in params['info']['bads']:
params['info']['bads'].remove(text)
color = params['def_colors'][ch_idx]
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
else:
params['info']['bads'].append(text)
color = params['bad_color']
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
if 'ica' in params:
params['plot_fun']()
else:
params['plot_update_proj_callback'](params)
def _plot_onscroll(event, params):
"""Function to handle scroll events."""
if event.key == 'control':
if event.step < 0:
event.key = '-'
else:
event.key = '+'
_plot_onkey(event, params)
return
if params['butterfly']:
return
_plot_raw_onscroll(event, params, len(params['ch_names']))
def _mouse_click(event, params):
"""Function to handle mouse click events."""
if event.inaxes is None:
if params['butterfly'] or not params['settings'][0]:
return
ax = params['ax']
ylim = ax.get_ylim()
pos = ax.transData.inverted().transform((event.x, event.y))
if pos[0] > 0 or pos[1] < 0 or pos[1] > ylim[0]:
return
if event.button == 1: # left click
params['label_click_fun'](pos)
elif event.button == 3: # right click
if 'ica' not in params:
_, ch_idx = _label2idx(params, pos)
if ch_idx is None:
return
if channel_type(params['info'], ch_idx) not in ['mag', 'grad',
'eeg', 'eog']:
logger.info('Event related fields / potentials only '
'available for MEG and EEG channels.')
return
fig = plot_epochs_image(params['epochs'],
picks=params['inds'][ch_idx],
fig=params['image_plot'])[0]
params['image_plot'] = fig
elif event.button == 1: # left click
# vertical scroll bar changed
if event.inaxes == params['ax_vscroll']:
if params['butterfly']:
return
ch_start = max(int(event.ydata) - params['n_channels'] // 2, 0)
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scroll bar changed
elif event.inaxes == params['ax_hscroll']:
# find the closest epoch time
times = params['epoch_times']
offset = 0.5 * params['n_epochs'] * len(params['epochs'].times)
xdata = times.flat[np.abs(times - (event.xdata - offset)).argmin()]
_plot_window(xdata, params)
# main axes
elif event.inaxes == params['ax']:
_pick_bad_epochs(event, params)
elif event.inaxes == params['ax'] and event.button == 2: # middle click
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
elif event.inaxes == params['ax'] and event.button == 3: # right click
n_times = len(params['epochs'].times)
xdata = int(event.xdata % n_times)
prev_xdata = 0
if len(params['vert_lines']) > 0:
prev_xdata = params['vert_lines'][0][0].get_data()[0][0]
while len(params['vert_lines']) > 0:
params['ax'].lines.remove(params['vert_lines'][0][0])
params['vert_lines'].pop(0)
if prev_xdata == xdata: # lines removed
params['vertline_t'].set_text('')
params['plot_fun']()
return
ylim = params['ax'].get_ylim()
for epoch_idx in range(params['n_epochs']): # plot lines
pos = [epoch_idx * n_times + xdata, epoch_idx * n_times + xdata]
params['vert_lines'].append(params['ax'].plot(pos, ylim, 'y',
zorder=4))
params['vertline_t'].set_text('%0.3f' % params['epochs'].times[xdata])
params['plot_fun']()
def _plot_onkey(event, params):
"""Function to handle key presses."""
import matplotlib.pyplot as plt
if event.key == 'down':
if params['butterfly']:
return
params['ch_start'] += params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'up':
if params['butterfly']:
return
params['ch_start'] -= params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'left':
sample = params['t_start'] - params['duration']
sample = np.max([0, sample])
_plot_window(sample, params)
elif event.key == 'right':
sample = params['t_start'] + params['duration']
sample = np.min([sample, params['times'][-1] - params['duration']])
times = params['epoch_times']
xdata = times.flat[np.abs(times - sample).argmin()]
_plot_window(xdata, params)
elif event.key == '-':
if params['butterfly']:
params['butterfly_scale'] /= 1.1
else:
params['scale_factor'] /= 1.1
params['plot_fun']()
elif event.key in ['+', '=']:
if params['butterfly']:
params['butterfly_scale'] *= 1.1
else:
params['scale_factor'] *= 1.1
params['plot_fun']()
elif event.key == 'f11':
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
elif event.key == 'pagedown':
if params['n_channels'] == 1 or params['butterfly']:
return
n_channels = params['n_channels'] - 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].collections.pop()
params['ax'].set_yticks(params['offsets'])
params['lines'].pop()
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'pageup':
if params['butterfly']:
return
from matplotlib.collections import LineCollection
n_channels = params['n_channels'] + 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=2, picker=3.)
params['ax'].add_collection(lc)
params['ax'].set_yticks(params['offsets'])
params['lines'].append(lc)
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'home':
n_epochs = params['n_epochs'] - 1
if n_epochs <= 0:
return
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] -= n_times
params['hsel_patch'].set_width(params['duration'])
params['data'] = params['data'][:, :-n_times]
params['plot_update_proj_callback'](params)
elif event.key == 'end':
n_epochs = params['n_epochs'] + 1
n_times = len(params['epochs'].times)
if n_times * n_epochs > len(params['times']):
return
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
if len(params['vert_lines']) > 0:
ax = params['ax']
pos = params['vert_lines'][0][0].get_data()[0] + params['duration']
params['vert_lines'].append(ax.plot(pos, ax.get_ylim(), 'y',
zorder=3))
params['duration'] += n_times
if params['t_start'] + params['duration'] > len(params['times']):
params['t_start'] -= n_times
params['hsel_patch'].set_x(params['t_start'])
params['hsel_patch'].set_width(params['duration'])
params['data'] = np.zeros((len(params['data']), params['duration']))
params['plot_update_proj_callback'](params)
elif event.key == 'b':
if params['fig_options'] is not None:
plt.close(params['fig_options'])
params['fig_options'] = None
_prepare_butterfly(params)
_plot_traces(params)
elif event.key == 'o':
if not params['butterfly']:
_open_options(params)
elif event.key == 'h':
_plot_histogram(params)
elif event.key == '?':
_onclick_help(event, params)
elif event.key == 'escape':
plt.close(params['fig'])
def _prepare_butterfly(params):
"""Helper function for setting up butterfly plot."""
from matplotlib.collections import LineCollection
butterfly = not params['butterfly']
if butterfly:
types = set(['grad', 'mag', 'eeg', 'eog',
'ecg']) & set(params['types'])
if len(types) < 1:
return
params['ax_vscroll'].set_visible(False)
ax = params['ax']
labels = ax.yaxis.get_ticklabels()
for label in labels:
label.set_visible(True)
ylim = (5. * len(types), 0.)
ax.set_ylim(ylim)
offset = ylim[0] / (4. * len(types))
ticks = np.arange(0, ylim[0], offset)
ticks = [ticks[x] if x < len(ticks) else 0 for x in range(20)]
ax.set_yticks(ticks)
used_types = 0
params['offsets'] = [ticks[2]]
if 'grad' in types:
pos = (0, 1 - (ticks[2] / ylim[0]))
params['ax2'].annotate('Grad (fT/cm)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'mag' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('Mag (fT)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'eeg' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('EEG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'eog' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('EOG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'ecg' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('ECG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
while len(params['lines']) < len(params['picks']):
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=2, picker=3.)
ax.add_collection(lc)
params['lines'].append(lc)
else: # change back to default view
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
params['ax_vscroll'].set_visible(True)
while len(params['ax2'].texts) > 0:
params['ax2'].texts.pop()
n_channels = params['n_channels']
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
ylim = (25., 0.)
params['ax'].set_ylim(ylim)
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['ax'].set_yticks(params['offsets'])
params['butterfly'] = butterfly
def _onpick(event, params):
"""Helper to add a channel name on click"""
if event.mouseevent.button != 2 or not params['butterfly']:
return # text label added with a middle mouse button
lidx = np.where([l is event.artist for l in params['lines']])[0][0]
text = params['text']
text.set_x(event.mouseevent.xdata)
text.set_y(event.mouseevent.ydata)
text.set_text(params['ch_names'][lidx])
text.set_visible(True)
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use _mouse_click (happens once per click)
# to do the drawing
def _close_event(event, params):
"""Function to drop selected bad epochs. Called on closing of the plot."""
params['epochs'].drop_epochs(params['bads'])
params['epochs'].info['bads'] = params['info']['bads']
logger.info('Channels marked as bad: %s' % params['epochs'].info['bads'])
def _resize_event(event, params):
"""Function to handle resize event"""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size)
_layout_figure(params)
def _update_channels_epochs(event, params):
"""Function for changing the amount of channels and epochs per view."""
from matplotlib.collections import LineCollection
# Channels
n_channels = int(np.around(params['channel_slider'].val))
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
while len(params['lines']) < n_channels:
lc = LineCollection(list(), linewidths=0.5, antialiased=False,
zorder=2, picker=3.)
params['ax'].add_collection(lc)
params['lines'].append(lc)
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
params['n_channels'] = n_channels
# Epochs
n_epochs = int(np.around(params['epoch_slider'].val))
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] = n_times * n_epochs
params['hsel_patch'].set_width(params['duration'])
params['data'] = np.zeros((len(params['data']), params['duration']))
if params['t_start'] + n_times * n_epochs > len(params['times']):
params['t_start'] = len(params['times']) - n_times * n_epochs
params['hsel_patch'].set_x(params['t_start'])
params['plot_update_proj_callback'](params)
def _toggle_labels(label, params):
"""Function for toggling axis labels on/off."""
if label == 'Channel names visible':
params['settings'][0] = not params['settings'][0]
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
elif label == 'Event-id visible':
params['settings'][1] = not params['settings'][1]
labels = params['ax2'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][1])
elif label == 'Epoch-id visible':
params['settings'][2] = not params['settings'][2]
labels = params['ax'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][2])
elif label == 'Zeroline visible':
params['settings'][3] = not params['settings'][3]
_plot_vert_lines(params)
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _open_options(params):
"""Function for opening the option window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
if params['fig_options'] is not None:
# turn off options dialog
plt.close(params['fig_options'])
params['fig_options'] = None
return
width = 10
height = 3
fig_options = figure_nobar(figsize=(width, height), dpi=80)
fig_options.canvas.set_window_title('View settings')
params['fig_options'] = fig_options
ax_channels = plt.axes([0.15, 0.1, 0.65, 0.1])
ax_epochs = plt.axes([0.15, 0.25, 0.65, 0.1])
ax_button = plt.axes([0.85, 0.1, 0.1, 0.25])
ax_check = plt.axes([0.15, 0.4, 0.4, 0.55])
plt.axis('off')
params['update_button'] = mpl.widgets.Button(ax_button, 'Update')
params['channel_slider'] = mpl.widgets.Slider(ax_channels, 'Channels', 1,
len(params['ch_names']),
valfmt='%0.0f',
valinit=params['n_channels'])
params['epoch_slider'] = mpl.widgets.Slider(ax_epochs, 'Epochs', 1,
len(params['epoch_times']),
valfmt='%0.0f',
valinit=params['n_epochs'])
params['checkbox'] = mpl.widgets.CheckButtons(ax_check,
['Channel names visible',
'Event-id visible',
'Epoch-id visible',
'Zeroline visible'],
actives=params['settings'])
update = partial(_update_channels_epochs, params=params)
params['update_button'].on_clicked(update)
labels_callback = partial(_toggle_labels, params=params)
params['checkbox'].on_clicked(labels_callback)
close_callback = partial(_settings_closed, params=params)
params['fig_options'].canvas.mpl_connect('close_event', close_callback)
try:
params['fig_options'].canvas.draw()
params['fig_options'].show(warn=False)
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
except Exception:
pass
def _settings_closed(events, params):
"""Function to handle close event from settings dialog."""
params['fig_options'] = None
def _plot_histogram(params):
"""Function for plotting histogram of peak-to-peak values."""
import matplotlib.pyplot as plt
epochs = params['epochs']
p2p = np.ptp(epochs.get_data(), axis=2)
types = list()
data = list()
if 'eeg' in params['types']:
eegs = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'eeg'])
data.append(eegs.ravel())
types.append('eeg')
if 'mag' in params['types']:
mags = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'mag'])
data.append(mags.ravel())
types.append('mag')
if 'grad' in params['types']:
grads = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'grad'])
data.append(grads.ravel())
types.append('grad')
params['histogram'] = plt.figure()
scalings = _handle_default('scalings')
units = _handle_default('units')
titles = _handle_default('titles')
colors = _handle_default('color')
for idx in range(len(types)):
ax = plt.subplot(len(types), 1, idx + 1)
plt.xlabel(units[types[idx]])
plt.ylabel('count')
color = colors[types[idx]]
rej = None
if epochs.reject is not None and types[idx] in epochs.reject.keys():
rej = epochs.reject[types[idx]] * scalings[types[idx]]
rng = [0., rej * 1.1]
else:
rng = None
plt.hist(data[idx] * scalings[types[idx]], bins=100, color=color,
range=rng)
if rej is not None:
ax.plot((rej, rej), (0, ax.get_ylim()[1]), color='r')
plt.title(titles[types[idx]])
params['histogram'].suptitle('Peak-to-peak histogram', y=0.99)
params['histogram'].subplots_adjust(hspace=0.6)
try:
params['histogram'].show(warn=False)
except:
pass
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _label2idx(params, pos):
"""Aux function for click on labels. Returns channel name and idx."""
labels = params['ax'].yaxis.get_ticklabels()
offsets = np.array(params['offsets']) + params['offsets'][0]
line_idx = np.searchsorted(offsets, pos[1])
text = labels[line_idx].get_text()
if len(text) == 0:
return None, None
ch_idx = params['ch_start'] + line_idx
return text, ch_idx
|
{
"content_hash": "5650341d03ce3fe62f4895d03941c3fe",
"timestamp": "",
"source": "github",
"line_count": 1529,
"max_line_length": 79,
"avg_line_length": 41.00327011118378,
"alnum_prop": 0.5463840239895364,
"repo_name": "cmoutard/mne-python",
"id": "3f5d6511cf31443388bac9bdfafaee8b0836dbb5",
"size": "62694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/viz/epochs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3171"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4669153"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
"""
Adds support for heat control units.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/thermostat.heat_control/
"""
import logging
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components import switch
from homeassistant.components.thermostat import (
STATE_HEAT, STATE_COOL, STATE_IDLE, ThermostatDevice)
from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT, STATE_ON, STATE_OFF
from homeassistant.helpers import condition
from homeassistant.helpers.event import track_state_change
DEPENDENCIES = ['switch', 'sensor']
TOL_TEMP = 0.3
CONF_NAME = 'name'
DEFAULT_NAME = 'Heat Control'
CONF_HEATER = 'heater'
CONF_SENSOR = 'target_sensor'
CONF_MIN_TEMP = 'min_temp'
CONF_MAX_TEMP = 'max_temp'
CONF_TARGET_TEMP = 'target_temp'
CONF_AC_MODE = 'ac_mode'
CONF_MIN_DUR = 'min_cycle_duration'
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = vol.Schema({
vol.Required("platform"): "heat_control",
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HEATER): cv.entity_id,
vol.Required(CONF_SENSOR): cv.entity_id,
vol.Optional(CONF_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_TARGET_TEMP): vol.Coerce(float),
vol.Optional(CONF_AC_MODE): vol.Coerce(bool),
vol.Optional(CONF_MIN_DUR): vol.All(cv.time_period, cv.positive_timedelta),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the heat control thermostat."""
name = config.get(CONF_NAME)
heater_entity_id = config.get(CONF_HEATER)
sensor_entity_id = config.get(CONF_SENSOR)
min_temp = config.get(CONF_MIN_TEMP)
max_temp = config.get(CONF_MAX_TEMP)
target_temp = config.get(CONF_TARGET_TEMP)
ac_mode = config.get(CONF_AC_MODE)
min_cycle_duration = config.get(CONF_MIN_DUR)
add_devices([HeatControl(hass, name, heater_entity_id, sensor_entity_id,
min_temp, max_temp, target_temp, ac_mode,
min_cycle_duration)])
# pylint: disable=too-many-instance-attributes, abstract-method
class HeatControl(ThermostatDevice):
"""Representation of a HeatControl device."""
# pylint: disable=too-many-arguments
def __init__(self, hass, name, heater_entity_id, sensor_entity_id,
min_temp, max_temp, target_temp, ac_mode, min_cycle_duration):
"""Initialize the thermostat."""
self.hass = hass
self._name = name
self.heater_entity_id = heater_entity_id
self.ac_mode = ac_mode
self.min_cycle_duration = min_cycle_duration
self._active = False
self._cur_temp = None
self._min_temp = min_temp
self._max_temp = max_temp
self._target_temp = target_temp
self._unit = hass.config.units.temperature_unit
track_state_change(hass, sensor_entity_id, self._sensor_changed)
sensor_state = hass.states.get(sensor_entity_id)
if sensor_state:
self._update_temp(sensor_state)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the thermostat."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def current_temperature(self):
"""Return the sensor temperature."""
return self._cur_temp
@property
def operation(self):
"""Return current operation ie. heat, cool, idle."""
if self.ac_mode:
cooling = self._active and self._is_device_active
return STATE_COOL if cooling else STATE_IDLE
else:
heating = self._active and self._is_device_active
return STATE_HEAT if heating else STATE_IDLE
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temp
def set_temperature(self, temperature):
"""Set new target temperature."""
self._target_temp = temperature
self._control_heating()
self.update_ha_state()
@property
def min_temp(self):
"""Return the minimum temperature."""
# pylint: disable=no-member
if self._min_temp:
return self._min_temp
else:
# get default temp from super class
return ThermostatDevice.min_temp.fget(self)
@property
def max_temp(self):
"""Return the maximum temperature."""
# pylint: disable=no-member
if self._min_temp:
return self._max_temp
else:
# Get default temp from super class
return ThermostatDevice.max_temp.fget(self)
def _sensor_changed(self, entity_id, old_state, new_state):
"""Called when temperature changes."""
if new_state is None:
return
self._update_temp(new_state)
self._control_heating()
self.update_ha_state()
def _update_temp(self, state):
"""Update thermostat with latest state from sensor."""
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
try:
self._cur_temp = self.hass.config.units.temperature(
float(state.state), unit)
except ValueError as ex:
_LOGGER.error('Unable to update from sensor: %s', ex)
def _control_heating(self):
"""Check if we need to turn heating on or off."""
if not self._active and None not in (self._cur_temp,
self._target_temp):
self._active = True
_LOGGER.info('Obtained current and target temperature. '
'Heat control active.')
if not self._active:
return
if self.min_cycle_duration:
if self._is_device_active:
current_state = STATE_ON
else:
current_state = STATE_OFF
long_enough = condition.state(self.hass, self.heater_entity_id,
current_state,
self.min_cycle_duration)
if not long_enough:
return
if self.ac_mode:
too_hot = self._cur_temp - self._target_temp > TOL_TEMP
is_cooling = self._is_device_active
if too_hot and not is_cooling:
_LOGGER.info('Turning on AC %s', self.heater_entity_id)
switch.turn_on(self.hass, self.heater_entity_id)
elif not too_hot and is_cooling:
_LOGGER.info('Turning off AC %s', self.heater_entity_id)
switch.turn_off(self.hass, self.heater_entity_id)
else:
too_cold = self._target_temp - self._cur_temp > TOL_TEMP
is_heating = self._is_device_active
if too_cold and not is_heating:
_LOGGER.info('Turning on heater %s', self.heater_entity_id)
switch.turn_on(self.hass, self.heater_entity_id)
elif not too_cold and is_heating:
_LOGGER.info('Turning off heater %s', self.heater_entity_id)
switch.turn_off(self.hass, self.heater_entity_id)
@property
def _is_device_active(self):
"""If the toggleable device is currently active."""
return switch.is_on(self.hass, self.heater_entity_id)
|
{
"content_hash": "342f1e7045a901f1a4bb192ca3f9caf1",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 79,
"avg_line_length": 35.16744186046512,
"alnum_prop": 0.6101044835339241,
"repo_name": "varunr047/homefile",
"id": "faf4059f891917a1e78363c97ae049695c046807",
"size": "7561",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/thermostat/heat_control.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1594834"
},
{
"name": "JavaScript",
"bytes": "1216"
},
{
"name": "Python",
"bytes": "3696131"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
}
|
"""Tests for dm_control.composer.props.primitive."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_control import composer
from dm_control import mjcf
from dm_control.entities.props import primitive
import numpy as np
class PrimitiveTest(parameterized.TestCase):
def _make_free_prop(self, geom_type='sphere', size=(0.1,), **kwargs):
prop = primitive.Primitive(geom_type=geom_type, size=size, **kwargs)
arena = composer.Arena()
arena.add_free_entity(prop)
physics = mjcf.Physics.from_mjcf_model(arena.mjcf_model)
return prop, physics
@parameterized.parameters([
dict(geom_type='sphere', size=[0.1]),
dict(geom_type='capsule', size=[0.1, 0.2]),
dict(geom_type='cylinder', size=[0.1, 0.2]),
dict(geom_type='box', size=[0.1, 0.2, 0.3]),
dict(geom_type='ellipsoid', size=[0.1, 0.2, 0.3]),
])
def test_instantiation(self, geom_type, size):
name = 'foo'
rgba = [1., 0., 1., 0.5]
prop, physics = self._make_free_prop(
geom_type=geom_type, size=size, name=name, rgba=rgba)
# Check that the name and other kwargs are set correctly.
self.assertEqual(prop.mjcf_model.model, name)
np.testing.assert_array_equal(physics.bind(prop.geom).rgba, rgba)
# Check that we can step without anything breaking.
physics.step()
@parameterized.parameters([
dict(position=[0., 0., 0.]),
dict(position=[0.1, -0.2, 0.3]),
])
def test_position_observable(self, position):
prop, physics = self._make_free_prop()
prop.set_pose(physics, position=position)
observation = prop.observables.position(physics)
np.testing.assert_array_equal(position, observation)
@parameterized.parameters([
dict(quat=[1., 0., 0., 0.]),
dict(quat=[0., -1., 1., 0.]),
])
def test_orientation_observable(self, quat):
prop, physics = self._make_free_prop()
normalized_quat = np.array(quat) / np.linalg.norm(quat)
prop.set_pose(physics, quaternion=normalized_quat)
observation = prop.observables.orientation(physics)
np.testing.assert_array_almost_equal(normalized_quat, observation)
@parameterized.parameters([
dict(velocity=[0., 0., 0.]),
dict(velocity=[0.1, -0.2, 0.3]),
])
def test_linear_velocity_observable(self, velocity):
prop, physics = self._make_free_prop()
prop.set_velocity(physics, velocity=velocity)
observation = prop.observables.linear_velocity(physics)
np.testing.assert_array_almost_equal(velocity, observation)
@parameterized.parameters([
dict(angular_velocity=[0., 0., 0.]),
dict(angular_velocity=[0.1, -0.2, 0.3]),
])
def test_angular_velocity_observable(self, angular_velocity):
prop, physics = self._make_free_prop()
prop.set_velocity(physics, angular_velocity=angular_velocity)
observation = prop.observables.angular_velocity(physics)
np.testing.assert_array_almost_equal(angular_velocity, observation)
if __name__ == '__main__':
absltest.main()
|
{
"content_hash": "c57cd776ac95023c7e923f8db1c71201",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 72,
"avg_line_length": 36.548780487804876,
"alnum_prop": 0.6770103436770103,
"repo_name": "deepmind/dm_control",
"id": "ae8f3265ccaa798bea5c59871fb765277c83d16f",
"size": "3664",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dm_control/entities/props/primitive_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "136624"
},
{
"name": "Python",
"bytes": "2097331"
}
],
"symlink_target": ""
}
|
"""
Test for: command line arguments
"""
from nose.tools import eq_, assert_raises
from m2bk import app, config, const
import os
def _get_arg_cfg_file_name(arg, filename):
try:
app.init_parsecmdline([arg, filename])
except FileNotFoundError:
pass
return config.get_config_file_name()
def test_args_config():
# file names
f1 = 'f1.txt'
f2 = 'f2.txt'
f3 = 'f3.txt'
# ---
# Test whether -c works as --config
eq_(_get_arg_cfg_file_name('-c', f1),
_get_arg_cfg_file_name('--config', f1),
msg="-c and --config are not capturing the expected file name")
# ---
# Test -c and --config with more than one value
assert_raises(SystemExit, app.init_parsecmdline, ['-c', f1, f2])
# absolute path is expected for f1
eq_(config.get_config_file_name(), os.path.abspath(f1),
msg="Unexpected file, got '{f}' instead of '{f1}'".format(f=config.get_config_file_name(), f1=os.path.abspath(f1)))
# ---
# test when several config directives are specified
try:
app.init_parsecmdline(['-c', f1, '--config', f2, '-c', f3])
except FileNotFoundError:
pass
# file name should be f3
eq_(config.get_config_file_name(), os.path.abspath(f3),
msg="The last --config/-c argument should be the one whose file name"
"should be captured")
def test_args_noargs():
# Test whether m2bk tries to use default config file
# when no arguments are present
try:
app.init_parsecmdline()
except FileNotFoundError:
pass
eq_(config.get_config_file_name(), config.CONF_DEFAULT_FILE,
msg="CONF_DEFAULT_FILE expected, got '{f}'".format(f=config.get_config_file_name()))
|
{
"content_hash": "eb698ca410d1c2ff17e6edc60cc3a4fe",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 123,
"avg_line_length": 31.962962962962962,
"alnum_prop": 0.6251448435689455,
"repo_name": "axltxl/m2bk",
"id": "7c4ff567e605de02cd9160bd6a2e993763c590d4",
"size": "1751",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "328"
},
{
"name": "Python",
"bytes": "88592"
}
],
"symlink_target": ""
}
|
import importlib
import logging
import pkg_resources
from datetime import datetime, timedelta, time
import requests
DT_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
LOG = logging.getLogger(__name__)
def parse_timedelta(delta):
hours, minutes, seconds = map(int, delta.split(':'))
return timedelta(hours=hours, minutes=minutes, seconds=seconds)
def parse_datetime(dt_stamp):
return datetime.strptime(dt_stamp, DT_FORMAT)
def parse_time(t_stamp):
hour, minute, second = map(int, t_stamp.split(':'))
return time(hour, minute, second)
def stringify_datetime(dt_stamp):
return dt_stamp.strftime(DT_FORMAT)
def import_function(func):
LOG.debug("finding function %s", func)
name, _, func_name = func.partition(':')
module = importlib.import_module(name)
try:
return getattr(module, func_name)
except AttributeError:
raise ImportError("No function named {}".format(func_name))
def iterate_subclasses(klass_map, target_klass):
for name, value in klass_map.items():
try:
klass_is_subclass = class_is_subclass(value, target_klass)
except TypeError:
pass
else:
if klass_is_subclass:
yield name, value
def class_is_subclass(value, target_klass):
return issubclass(value, target_klass) and value is not target_klass
def get_class_from_type_value(type_name, target_klass, conf, config):
klass_map = {
entry_point.name: entry_point.load()
for entry_point in pkg_resources.iter_entry_points(
"boss_{}".format(type_name)
)
if class_is_subclass(entry_point.load(), target_klass)
}
klass_type = conf['type']
try:
klass = klass_map[klass_type]
except (KeyError, ValueError):
raise ValueError(
"unknown {} type {!r}.\n"
"valid types: {}".format(
type_name,
klass_type,
klass_map.keys()
)
)
else:
return klass.from_configs(config, conf)
class request_maker(object):
def __init__(self, target):
self.target = target
def __call__(self, params):
response = requests.post(self.target, json=params)
LOG.info("%s [%s]", self.target, response.status_code)
LOG.debug(response.content)
|
{
"content_hash": "41c5a155c5ffac000a639060ba238844",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 72,
"avg_line_length": 26.75862068965517,
"alnum_prop": 0.618127147766323,
"repo_name": "yoshrote/boss",
"id": "b7d5d4ec69e77b9d3f24becb3d1cb78ec3f15d4b",
"size": "2328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boss/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37255"
}
],
"symlink_target": ""
}
|
"""Satori main module."""
__all__ = ('__version__')
try:
import eventlet
eventlet.monkey_patch()
except ImportError:
pass
import pbr.version
from satori import shell
version_info = pbr.version.VersionInfo('satori')
try:
__version__ = version_info.version_string()
except AttributeError:
__version__ = None
def discover(address=None):
"""Temporary to demo python API.
TODO(zns): make it real
"""
shell.main(argv=[address])
return {'address': address, 'other info': '...'}
|
{
"content_hash": "de65c253159e560bef52319fed572506",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 52,
"avg_line_length": 17.93103448275862,
"alnum_prop": 0.6442307692307693,
"repo_name": "lil-cain/satori",
"id": "989ced303607671dbb0575fd7fa26fb95b7e20ab",
"size": "1084",
"binary": false,
"copies": "3",
"ref": "refs/heads/ohai_solo_install_dir",
"path": "satori/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2840"
},
{
"name": "Python",
"bytes": "211964"
},
{
"name": "Shell",
"bytes": "1111"
}
],
"symlink_target": ""
}
|
import unittest
import os
import test.functional as tf
from swift.common.middleware.s3api.etree import fromstring
from test.functional.s3api import S3ApiBase
from test.functional.s3api.s3_test_client import Connection
from test.functional.s3api.utils import get_error_code
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class TestS3ApiService(S3ApiBase):
def setUp(self):
super(TestS3ApiService, self).setUp()
def test_service(self):
# GET Service(without bucket)
status, headers, body = self.conn.make_request('GET')
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue(headers['content-type'] is not None)
# TODO; requires consideration
# self.assertEqual(headers['transfer-encoding'], 'chunked')
elem = fromstring(body, 'ListAllMyBucketsResult')
buckets = elem.findall('./Buckets/Bucket')
self.assertEqual(list(buckets), [])
owner = elem.find('Owner')
self.assertEqual(self.conn.user_id, owner.find('ID').text)
self.assertEqual(self.conn.user_id, owner.find('DisplayName').text)
# GET Service(with Bucket)
req_buckets = ('bucket', 'bucket2')
for bucket in req_buckets:
self.conn.make_request('PUT', bucket)
status, headers, body = self.conn.make_request('GET')
self.assertEqual(status, 200)
elem = fromstring(body, 'ListAllMyBucketsResult')
resp_buckets = elem.findall('./Buckets/Bucket')
self.assertEqual(len(list(resp_buckets)), 2)
for b in resp_buckets:
self.assertTrue(b.find('Name').text in req_buckets)
self.assertTrue(b.find('CreationDate') is not None)
def test_service_error_signature_not_match(self):
auth_error_conn = Connection(tf.config['s3_access_key'], 'invalid')
status, headers, body = auth_error_conn.make_request('GET')
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
self.assertEqual(headers['content-type'], 'application/xml')
def test_service_error_no_date_header(self):
# Without x-amz-date/Date header, that makes 403 forbidden
status, headers, body = self.conn.make_request(
'GET', headers={'Date': '', 'x-amz-date': ''})
self.assertEqual(status, 403)
self.assertEqual(get_error_code(body), 'AccessDenied')
self.assertIn(b'AWS authentication requires a valid Date '
b'or x-amz-date header', body)
class TestS3ApiServiceSigV4(TestS3ApiService):
@classmethod
def setUpClass(cls):
os.environ['S3_USE_SIGV4'] = "True"
@classmethod
def tearDownClass(cls):
del os.environ['S3_USE_SIGV4']
def setUp(self):
super(TestS3ApiServiceSigV4, self).setUp()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "4c8731b758ed9e80aa032ee384c33ae2",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 75,
"avg_line_length": 33.94186046511628,
"alnum_prop": 0.6546762589928058,
"repo_name": "swiftstack/swift",
"id": "77779cba078113e92bcf83e548919517096bae03",
"size": "3509",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/functional/s3api/test_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3088"
},
{
"name": "HTML",
"bytes": "625"
},
{
"name": "Python",
"bytes": "12427848"
},
{
"name": "Shell",
"bytes": "8704"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import sys
from pdc_client.plugin_helpers import PDCClientPlugin, add_parser_arguments, extract_arguments
class BuildImagePlugin(PDCClientPlugin):
command = 'build-image'
def register(self):
self.set_command()
list_parser = self.add_action('list', help='list all build images')
list_parser.add_argument('--show-md5', action='store_true',
help='whether to display md5 checksums')
add_parser_arguments(list_parser, {'component_name': {},
'rpm_version': {},
'rpm_release': {},
'image_id': {},
'image_format': {},
'md5': {},
'archive_build_nvr': {},
'archive_name': {},
'archive_size': {},
'archive_md5': {},
'release_id': {}},
group='Filtering')
list_parser.set_defaults(func=self.list_build_image)
info_parser = self.add_action('info', help='display details of a build image')
info_parser.add_argument('image_id', metavar='IMAGE_ID')
info_parser.set_defaults(func=self.build_image_info)
def _print_build_image_list(self, build_images, with_md5=False):
fmt = '{image_id}'
if with_md5:
fmt = '{image_id:50}{md5}'
print(fmt.format(image_id='Image-ID', md5='MD5'))
print()
for build_image in build_images:
print(fmt.format(**build_image))
def list_build_image(self, args):
filters = extract_arguments(args)
build_images = self.client.get_paged(self.client['build-images']._, **filters)
if args.json:
print(self.to_json(list(build_images)))
return
self._print_build_image_list(build_images, args.show_md5)
def build_image_info(self, args, image_id=None):
image_id = image_id or args.image_id
build_images = self.client['build-images']._(image_id=image_id)
if not build_images['count']:
print('Not found')
sys.exit(1)
build_image = build_images['results'][0]
if args.json:
print(self.to_json(build_image))
return
fmt = '{0:20} {1}'
print(fmt.format('Image ID', build_image['image_id']))
print(fmt.format('Image Format', build_image['image_format']))
print(fmt.format('URL', build_image['url']))
print(fmt.format('MD5', build_image['md5']))
for key in ('releases', 'rpms'):
if build_image[key]:
print('\nRelated %s:' % key)
for value in build_image[key]:
print(' * {0}'.format(value))
if build_image['archives']:
print('\nRelated archives:')
fmt = '* {0:40}{1:60}{2}'
print(fmt.format('MD5', 'Name', 'Build NVR'))
fmt = ' {0:40}{1:60}{2}'
for archive in build_image['archives']:
print(fmt.format(archive['md5'], archive['name'], archive['build_nvr']))
PLUGIN_CLASSES = [BuildImagePlugin]
|
{
"content_hash": "d5507972381e3e06281e46adc5f0e2d9",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 94,
"avg_line_length": 39.264367816091955,
"alnum_prop": 0.4970725995316159,
"repo_name": "product-definition-center/pdc-client",
"id": "8e5b6f4c534e53d7368c4b00ac7fcad6d2fbfe33",
"size": "3550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdc_client/plugins/build_images.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "291383"
},
{
"name": "Shell",
"bytes": "2021"
}
],
"symlink_target": ""
}
|
"""Tests decision_mapper"""
import ast
import unittest
from collections import OrderedDict
from xml.etree import ElementTree as ET
from o2a.converter.task import Task
from o2a.mappers import decision_mapper
class TestDecisionMapper(unittest.TestCase):
def setUp(self):
# language=XML
decision_node_str = """
<decision name="decision">
<switch>
<case to="task1">${firstNotNull('', '')}</case>
<case to="task2">True</case>
<default to="task3" />
</switch>
</decision>
"""
self.decision_node = ET.fromstring(decision_node_str)
def test_create_mapper(self):
mapper = self._get_decision_mapper()
# make sure everything is getting initialized correctly
self.assertEqual("test_id", mapper.name)
self.assertEqual(self.decision_node, mapper.oozie_node)
# test conversion from Oozie EL to Jinja
self.assertEqual("{{functions.first_not_null('','')}}", next(iter(mapper.case_dict)))
def test_to_tasks_and_relations(self):
mapper = self._get_decision_mapper()
tasks, relations = mapper.to_tasks_and_relations()
self.assertEqual(
[
Task(
task_id="test_id",
template_name="decision.tpl",
trigger_rule="one_success",
template_params={
"case_dict": OrderedDict(
[("{{functions.first_not_null('','')}}", "task1"), ("True", "task2")]
),
"default_case": "task3",
},
)
],
tasks,
)
self.assertEqual(relations, [])
def test_required_imports(self):
mapper = self._get_decision_mapper()
imps = mapper.required_imports()
imp_str = "\n".join(imps)
ast.parse(imp_str)
def _get_decision_mapper(self):
return decision_mapper.DecisionMapper(
oozie_node=self.decision_node, name="test_id", dag_name="DAG_NAME_B", job_properties={}, config={}
)
|
{
"content_hash": "849c1d812b3d32e20988fc8753acee8c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 110,
"avg_line_length": 32.03030303030303,
"alnum_prop": 0.554872280037843,
"repo_name": "GoogleCloudPlatform/oozie-to-airflow",
"id": "f9143166f471c4e144ad370af1843aa662d1f4ec",
"size": "2708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/mappers/test_decision_mapper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "528273"
},
{
"name": "Shell",
"bytes": "57460"
},
{
"name": "Smarty",
"bytes": "31948"
}
],
"symlink_target": ""
}
|
"""
This file is a palceholder for if new clustering algorithms come into
nussl at some point and need to be tested.
"""
from sklearn import datasets
from sklearn.metrics import adjusted_mutual_info_score
from sklearn.preprocessing import StandardScaler
import sklearn
import numpy as np
import pytest
from nussl.ml import cluster
import matplotlib.pyplot as plt
import nussl
@pytest.fixture(scope="module")
def cluster_data():
np.random.seed(0)
# ============
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
# ============
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
# Anisotropicly distributed data
random_state = 170
X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
aniso = (X_aniso, y)
# blobs with varied variances
varied = datasets.make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
default_base = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3,
'min_samples': 20,
'xi': 0.05,
'min_cluster_size': 0.1}
data = [
('noisy_circles', noisy_circles, {'damping': .77, 'preference': -240,
'quantile': .2, 'n_clusters': 2,
'min_samples': 20, 'xi': 0.25}),
('noisy_moons', noisy_moons, {'damping': .75, 'preference': -220, 'n_clusters': 2}),
('varied', varied, {'eps': .18, 'n_neighbors': 2,
'min_samples': 5, 'xi': 0.035, 'min_cluster_size': .2}),
('aniso', aniso, {'eps': .15, 'n_neighbors': 2,
'min_samples': 20, 'xi': 0.1, 'min_cluster_size': .2}),
('blobs', blobs, {}),
]
yield data, default_base
|
{
"content_hash": "422a7c475742ac5d67493a07a5bf70a0",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 92,
"avg_line_length": 37.671875,
"alnum_prop": 0.5350476980506014,
"repo_name": "interactiveaudiolab/nussl",
"id": "dfb8fd5b6ddf3fafed74f46b9789af3b1101315e",
"size": "2411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/ml/test_clustering.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "11692"
},
{
"name": "Python",
"bytes": "591205"
},
{
"name": "Shell",
"bytes": "26"
}
],
"symlink_target": ""
}
|
import string
filename = open('D:\Code\Python4test\sampleFile.txt')
filelines = filename.readlines()
filename.close()
word_cnt = {}
for line in filelines:
line = line.rstrip()
identity = line.maketrans(' ', ' ')
pun_num = string.punctuation + string.digits
line = line.translate(identity)
line = line.lower()
word_list = line.split(' ')
for word in word_list:
if word in word_cnt:
word_cnt[word] += 1
else:
word_cnt[word] = 1
# 将字典按值的大小进行排序
result = sorted(word_cnt.items(), key=lambda d: d[1], reverse=True)
print (result)
|
{
"content_hash": "1fc6e838c01b08e9b14dd86bd9df3d8a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 67,
"avg_line_length": 24.833333333333332,
"alnum_prop": 0.6241610738255033,
"repo_name": "ismethr/Code",
"id": "e4467dc84df80bfb594bf4203e98110dd8348223",
"size": "620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python4test/andy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "17"
},
{
"name": "C",
"bytes": "35480"
},
{
"name": "C++",
"bytes": "77652"
},
{
"name": "CMake",
"bytes": "17870"
},
{
"name": "M",
"bytes": "468"
},
{
"name": "Matlab",
"bytes": "63483"
},
{
"name": "Python",
"bytes": "20097"
}
],
"symlink_target": ""
}
|
def combination_util(arr, n, r, index, data, i):
"""
Current combination is ready to be printed, print it
arr[] ---> Input Array
data[] ---> Temporary array to store current combination
start & end ---> Staring and Ending indexes in arr[]
index ---> Current index in data[]
r ---> Size of a combination to be printed
"""
if index == r:
for j in range(r):
print(data[j], end=" ")
print(" ")
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
data[index] = arr[i]
combination_util(arr, n, r, index + 1, data, i + 1)
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(arr, n, r, index, data, i + 1)
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def print_combination(arr, n, r):
# A temporary array to store all combination one by one
data = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(arr, n, r, 0, data, 0)
if __name__ == "__main__":
# Driver code to check the function above
arr = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
|
{
"content_hash": "59d286d13538efa438ed37dc7db417f4",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 60,
"avg_line_length": 34.21951219512195,
"alnum_prop": 0.6058446186742694,
"repo_name": "TheAlgorithms/Python",
"id": "819fd8106def1f371e5431e9c0944eb1528305fd",
"size": "1477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynamic_programming/subset_generation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2601694"
}
],
"symlink_target": ""
}
|
from troposphere import Parameter, Ref, Template, Tags, If, Equals, Not, Join
from troposphere.constants import KEY_PAIR_NAME, SUBNET_ID, M4_LARGE, NUMBER
import troposphere.emr as emr
import troposphere.iam as iam
scaling_policy = emr.SimpleScalingPolicyConfiguration(
AdjustmentType="EXACT_CAPACITY",
ScalingAdjustment="1",
CoolDown="300"
)
kms_key = 'arn:aws:kms:us-east-1:123456789012:key/1234-1234-1234-1234-1234'
security_configuration = {
'EncryptionConfiguration': {
'EnableInTransitEncryption': 'true',
'InTransitEncryptionConfiguration': {
'TLSCertificateConfiguration': {
'CertificateProviderType': 'PEM',
'S3Object': 's3://MyConfigStore/artifacts/MyCerts.zip'
}
},
'EnableAtRestEncryption': 'true',
'AtRestEncryptionConfiguration': {
'S3EncryptionConfiguration': {
'EncryptionMode': 'SSE-KMS',
'AwsKmsKey': kms_key
},
'LocalDiskEncryptionConfiguration': {
'EncryptionKeyProviderType': 'AwsKms',
'AwsKmsKey': kms_key
}
}
}
}
def generate_rules(rules_name):
global emr, scaling_policy
rules = [
emr.ScalingRule(
Name=rules_name,
Description="%s rules" % rules_name,
Action=emr.ScalingAction(
Market="ON_DEMAND",
SimpleScalingPolicyConfiguration=scaling_policy
),
Trigger=emr.ScalingTrigger(
CloudWatchAlarmDefinition=emr.CloudWatchAlarmDefinition(
ComparisonOperator="GREATER_THAN",
EvaluationPeriods="120",
MetricName="TestMetric",
Namespace="AWS/ElasticMapReduce",
Period="300",
Statistic="AVERAGE",
Threshold="50",
Unit="PERCENT",
Dimensions=[
emr.MetricDimension(
'my.custom.master.property',
'my.custom.master.value'
)
]
)
)
)
]
return rules
template = Template()
template.add_description(
"Sample CloudFormation template for creating an EMR cluster"
)
keyname = template.add_parameter(Parameter(
"KeyName",
Description="Name of an existing EC2 KeyPair to enable SSH "
"to the instances",
Type=KEY_PAIR_NAME
))
subnet = template.add_parameter(Parameter(
"Subnet",
Description="Subnet ID for creating the EMR cluster",
Type=SUBNET_ID
))
spot = template.add_parameter(Parameter(
"SpotPrice",
Description="Spot price (or use 0 for 'on demand' instance)",
Type=NUMBER,
Default="0.1"
))
withSpotPrice = "WithSpotPrice"
template.add_condition(withSpotPrice, Not(Equals(Ref(spot), "0")))
gcTimeRatio = template.add_parameter(Parameter(
"GcTimeRatioValue",
Description="Hadoop name node garbage collector time ratio",
Type=NUMBER,
Default="19"
))
# IAM roles required by EMR
emr_service_role = template.add_resource(iam.Role(
'EMRServiceRole',
AssumeRolePolicyDocument={
"Statement": [{
"Effect": "Allow",
"Principal": {
"Service": [
"elasticmapreduce.amazonaws.com"
]
},
"Action": ["sts:AssumeRole"]
}]
},
ManagedPolicyArns=[
'arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole'
]
))
emr_autoscaling_role = "EMR_AutoScaling_DefaultRole"
emr_job_flow_role = template.add_resource(iam.Role(
"EMRJobFlowRole",
AssumeRolePolicyDocument={
"Statement": [{
"Effect": "Allow",
"Principal": {
"Service": [
"ec2.amazonaws.com"
]
},
"Action": ["sts:AssumeRole"]
}]
},
ManagedPolicyArns=[
'arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforEC2Role'
]
))
emr_instance_profile = template.add_resource(iam.InstanceProfile(
"EMRInstanceProfile",
Roles=[Ref(emr_job_flow_role)]
))
# EMR Cluster Resource
security_config = template.add_resource(emr.SecurityConfiguration(
'EMRSecurityConfiguration',
Name="EMRSampleClusterSecurityConfiguration",
SecurityConfiguration=security_configuration,
))
cluster = template.add_resource(emr.Cluster(
"EMRSampleCluster",
Name="EMR Sample Cluster",
ReleaseLabel='emr-4.4.0',
SecurityConfiguration=Ref(security_config),
BootstrapActions=[emr.BootstrapActionConfig(
Name='Dummy bootstrap action',
ScriptBootstrapAction=emr.ScriptBootstrapActionConfig(
Path='file:/usr/share/aws/emr/scripts/install-hue',
Args=["dummy", "parameter"]
)
)],
Configurations=[
emr.Configuration(
Classification="core-site",
ConfigurationProperties={
'hadoop.security.groups.cache.secs': '250'
}
),
emr.Configuration(
Classification="mapred-site",
ConfigurationProperties={
'mapred.tasktracker.map.tasks.maximum': '2',
'mapreduce.map.sort.spill.percent': '90',
'mapreduce.tasktracker.reduce.tasks.maximum': '5'
}
),
emr.Configuration(
Classification="hadoop-env",
Configurations=[
emr.Configuration(
Classification="export",
ConfigurationProperties={
"HADOOP_DATANODE_HEAPSIZE": "2048",
"HADOOP_NAMENODE_OPTS": Join("", ["-XX:GCTimeRatio=",
Ref(gcTimeRatio)])
}
)
]
)
],
JobFlowRole=Ref(emr_instance_profile),
ServiceRole=Ref(emr_service_role),
AutoScalingRole=Ref(emr_autoscaling_role),
Instances=emr.JobFlowInstancesConfig(
Ec2KeyName=Ref(keyname),
Ec2SubnetId=Ref(subnet),
MasterInstanceGroup=emr.InstanceGroupConfigProperty(
Name="Master Instance",
InstanceCount="1",
InstanceType=M4_LARGE,
Market="ON_DEMAND",
AutoScalingPolicy=emr.AutoScalingPolicy(
Constraints=emr.ScalingConstraints(
MinCapacity="1",
MaxCapacity="3"
),
Rules=generate_rules("MasterAutoScalingPolicy")
)
),
CoreInstanceGroup=emr.InstanceGroupConfigProperty(
Name="Core Instance",
BidPrice=If(withSpotPrice, Ref(spot), Ref("AWS::NoValue")),
Market=If(withSpotPrice, "SPOT", "ON_DEMAND"),
AutoScalingPolicy=emr.AutoScalingPolicy(
Constraints=emr.ScalingConstraints(
MinCapacity="1",
MaxCapacity="3"
),
Rules=generate_rules("CoreAutoScalingPolicy"),
),
EbsConfiguration=emr.EbsConfiguration(
EbsBlockDeviceConfigs=[
emr.EbsBlockDeviceConfigs(
VolumeSpecification=emr.VolumeSpecification(
SizeInGB="10",
VolumeType="gp2"
),
VolumesPerInstance="1"
)
],
EbsOptimized="true"
),
InstanceCount="1",
InstanceType=M4_LARGE,
)
),
Applications=[
emr.Application(Name="Hadoop"),
emr.Application(Name="Hive"),
emr.Application(Name="Mahout"),
emr.Application(Name="Pig"),
emr.Application(Name="Spark")
],
VisibleToAllUsers="true",
Tags=Tags(
Name="EMR Sample Cluster"
)
))
step = template.add_resource(emr.Step(
'TestStep',
Name="TestStep",
ActionOnFailure='CONTINUE',
HadoopJarStep=emr.HadoopJarStepConfig(
Args=["5", "10"],
Jar="s3://emr-cfn-test/hadoop-mapreduce-examples-2.6.0.jar",
MainClass="pi",
StepProperties=[
emr.KeyValue('my.custom.property', 'my.custom.value')
]
),
JobFlowId=Ref(cluster)
))
print(template.to_json())
|
{
"content_hash": "df80eee257e88e2f09ebe7461624376c",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 79,
"avg_line_length": 30.974729241877256,
"alnum_prop": 0.5503496503496503,
"repo_name": "7digital/troposphere",
"id": "add86d953594bd4e5b7abdf8f2708754c5e63955",
"size": "8580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/EMR_Cluster.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "579"
},
{
"name": "Python",
"bytes": "356311"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Summary'
db.create_table(u'liberia_summary', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date', self.gf('django.db.models.fields.DateField')(null=True)),
('total_deaths', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('deaths_last_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('deaths_this_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('total_cases', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('cases_last_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('cases_this_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('total_contacts', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('contacts_last_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('contacts_this_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('total_hcw_deaths', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('hcw_deaths_last_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('hcw_deaths_this_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('total_hcw_cases', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('hcw_cases_last_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('hcw_cases_this_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('total_labs_processed', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('labs_processed_last_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('labs_processed_this_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('total_labs_collected', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('labs_collected_last_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('labs_collected_this_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('total_fcr', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('fcr_last_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
('fcr_this_week', self.gf('django.db.models.fields.IntegerField')(max_length=50, null=True, blank=True)),
))
db.send_create_signal(u'liberia', ['Summary'])
def backwards(self, orm):
# Deleting model 'Summary'
db.delete_table(u'liberia_summary')
models = {
u'liberia.author': {
'Meta': {'object_name': 'Author'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'liberia.crisisnetentry': {
'Meta': {'ordering': "['createdAt', 'is_geocoded', 'author', 'source']", 'object_name': 'CrisisNetEntry'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['liberia.Author']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'createdAt': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_geocoded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'lifespan': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'longitude': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'publishedAt': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'remoteID': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['liberia.Tag']", 'symmetrical': 'False', 'blank': 'True'}),
'updatedAt': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'})
},
u'liberia.datestats': {
'Meta': {'ordering': "['date']", 'object_name': 'DateStats'},
'contacts_completed_observation': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'contacts_lost_followup': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_cases': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'new_confirmed_cases': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'new_probable_cases': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'new_suspected_cases': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'news_contacts': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'original_date': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'today_deaths_all': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'today_deaths_confirmed': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'today_deaths_probable': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'today_deaths_suspected': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_cases': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_confirmed_cases': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_deaths_all': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_deaths_confirmed': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_deaths_probable': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_deaths_suspected': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_probable_cases': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_suspected_cases': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'liberia.location': {
'Meta': {'ordering': "['name']", 'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'liberia.locationsitrep': {
'CFR': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'Meta': {'ordering': "['location']", 'object_name': 'LocationSitRep'},
'cases_cum': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'cases_cum_confirmed': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'cases_cum_probable': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'cases_cum_suspected': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'date_span': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'deaths': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'hc_workers': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'hcw_cases_cum': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'hcw_deaths_cum': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['liberia.Location']", 'null': 'True', 'blank': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'sit_rep': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['liberia.SitRep']", 'null': 'True', 'blank': 'True'}),
'total_deaths_all': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_deaths_confirmed': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_deaths_suspected': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_probable_deaths': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'liberia.sitrep': {
'Meta': {'ordering': "['num']", 'object_name': 'SitRep'},
'date': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'date_span': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'liberia.summary': {
'Meta': {'ordering': "['date']", 'object_name': 'Summary'},
'cases_last_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'cases_this_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'contacts_last_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'contacts_this_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'deaths_last_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'deaths_this_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'fcr_last_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'fcr_this_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'hcw_cases_last_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'hcw_cases_this_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'hcw_deaths_last_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'hcw_deaths_this_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'labs_collected_last_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'labs_collected_this_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'labs_processed_last_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'labs_processed_this_week': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_cases': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_contacts': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_deaths': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_fcr': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_hcw_cases': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_hcw_deaths': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_labs_collected': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'total_labs_processed': ('django.db.models.fields.IntegerField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'liberia.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['liberia']
|
{
"content_hash": "6bd0f9fe3b5a5d81aa0b4a177227072e",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 148,
"avg_line_length": 95.16265060240964,
"alnum_prop": 0.5719440400075964,
"repo_name": "caseymm/django-ebola",
"id": "48710c982842608678acac8949eaab06e4953abb",
"size": "15821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "liberia/migrations/0014_auto__add_summary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "72"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Makefile",
"bytes": "5612"
},
{
"name": "Python",
"bytes": "467739"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
}
|
"""
Class for VM tasks like spawn, snapshot, suspend, resume etc.
"""
import base64
import os
import time
import urllib
import urllib2
import uuid
from nova.compute import power_state
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.virt.vmwareapi import network_utils
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmware_images
vmware_vif_driver_opt = cfg.StrOpt('vmware_vif_driver',
default='nova.virt.vmwareapi.vif.VMWareVlanBridgeDriver',
help='The VMWare VIF driver to configure the VIFs.')
CONF = cfg.CONF
CONF.register_opt(vmware_vif_driver_opt)
LOG = logging.getLogger(__name__)
VMWARE_POWER_STATES = {
'poweredOff': power_state.SHUTDOWN,
'poweredOn': power_state.RUNNING,
'suspended': power_state.PAUSED}
class VMWareVMOps(object):
"""Management class for VM-related tasks."""
def __init__(self, session):
"""Initializer."""
self._session = session
self._vif_driver = importutils.import_object(CONF.vmware_vif_driver)
def list_instances(self):
"""Lists the VM instances that are registered with the ESX host."""
LOG.debug(_("Getting list of instances"))
vms = self._session._call_method(vim_util, "get_objects",
"VirtualMachine",
["name", "runtime.connectionState"])
lst_vm_names = []
for vm in vms:
vm_name = None
conn_state = None
for prop in vm.propSet:
if prop.name == "name":
vm_name = prop.val
elif prop.name == "runtime.connectionState":
conn_state = prop.val
# Ignoring the oprhaned or inaccessible VMs
if conn_state not in ["orphaned", "inaccessible"]:
lst_vm_names.append(vm_name)
LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
return lst_vm_names
def spawn(self, context, instance, image_meta, network_info):
"""
Creates a VM instance.
Steps followed are:
1. Create a VM with no disk and the specifics in the instance object
like RAM size.
2. Create a dummy vmdk of the size of the disk file that is to be
uploaded. This is required just to create the metadata file.
3. Delete the -flat.vmdk file created in the above step and retain
the metadata .vmdk file.
4. Upload the disk file.
5. Attach the disk to the VM by reconfiguring the same.
6. Power on the VM.
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref:
raise exception.InstanceExists(name=instance.name)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
def _get_datastore_ref():
"""Get the datastore list and choose the first local storage."""
data_stores = self._session._call_method(vim_util, "get_objects",
"Datastore", ["summary.type", "summary.name"])
for elem in data_stores:
ds_name = None
ds_type = None
for prop in elem.propSet:
if prop.name == "summary.type":
ds_type = prop.val
elif prop.name == "summary.name":
ds_name = prop.val
# Local storage identifier
if ds_type == "VMFS":
data_store_name = ds_name
return data_store_name
if data_store_name is None:
msg = _("Couldn't get a local Datastore reference")
LOG.error(msg, instance=instance)
raise exception.NovaException(msg)
data_store_name = _get_datastore_ref()
def _get_image_properties():
"""
Get the Size of the flat vmdk file that is there on the storage
repository.
"""
_image_info = vmware_images.get_vmdk_size_and_properties(context,
instance.image_ref,
instance)
image_size, image_properties = _image_info
vmdk_file_size_in_kb = int(image_size) / 1024
os_type = image_properties.get("vmware_ostype", "otherGuest")
adapter_type = image_properties.get("vmware_adaptertype",
"lsiLogic")
return vmdk_file_size_in_kb, os_type, adapter_type
vmdk_file_size_in_kb, os_type, adapter_type = _get_image_properties()
def _get_vmfolder_and_res_pool_mors():
"""Get the Vm folder ref from the datacenter."""
dc_objs = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["vmFolder"])
# There is only one default datacenter in a standalone ESX host
vm_folder_mor = dc_objs[0].propSet[0].val
# Get the resource pool. Taking the first resource pool coming our
# way. Assuming that is the default resource pool.
res_pool_mor = self._session._call_method(vim_util, "get_objects",
"ResourcePool")[0].obj
return vm_folder_mor, res_pool_mor
vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
def _check_if_network_bridge_exists(network_name):
network_ref = network_utils.get_network_with_the_name(
self._session, network_name)
if network_ref is None:
raise exception.NetworkNotFoundForBridge(bridge=network_name)
return network_ref
def _get_vif_infos():
vif_infos = []
for (network, mapping) in network_info:
mac_address = mapping['mac']
network_name = network['bridge']
if mapping.get('should_create_vlan'):
network_ref = self._vif_driver.ensure_vlan_bridge(
self._session, network)
else:
network_ref = _check_if_network_bridge_exists(network_name)
vif_infos.append({'network_name': network_name,
'mac_address': mac_address,
'network_ref': network_ref,
})
return vif_infos
vif_infos = _get_vif_infos()
# Get the create vm config spec
config_spec = vm_util.get_vm_create_spec(
client_factory, instance,
data_store_name, vif_infos, os_type)
def _execute_create_vm():
"""Create VM on ESX host."""
LOG.debug(_("Creating VM on the ESX host"), instance=instance)
# Create the VM on the ESX host
vm_create_task = self._session._call_method(
self._session._get_vim(),
"CreateVM_Task", vm_folder_mor,
config=config_spec, pool=res_pool_mor)
self._session._wait_for_task(instance['uuid'], vm_create_task)
LOG.debug(_("Created VM on the ESX host"), instance=instance)
_execute_create_vm()
# Set the machine.id parameter of the instance to inject
# the NIC configuration inside the VM
if CONF.flat_injected:
self._set_machine_id(client_factory, instance, network_info)
# Naming the VM files in correspondence with the VM instance name
# The flat vmdk file name
flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % (instance.name,
instance.name)
# The vmdk meta-data file
uploaded_vmdk_name = "%s/%s.vmdk" % (instance.name, instance.name)
flat_uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
flat_uploaded_vmdk_name)
uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
uploaded_vmdk_name)
def _create_virtual_disk():
"""Create a virtual disk of the size of flat vmdk file."""
# Create a Virtual Disk of the size of the flat vmdk file. This is
# done just to generate the meta-data file whose specifics
# depend on the size of the disk, thin/thick provisioning and the
# storage adapter type.
# Here we assume thick provisioning and lsiLogic for the adapter
# type
LOG.debug(_("Creating Virtual Disk of size "
"%(vmdk_file_size_in_kb)s KB and adapter type "
"%(adapter_type)s on the ESX host local store"
" %(data_store_name)s") %
{"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
"adapter_type": adapter_type,
"data_store_name": data_store_name},
instance=instance)
vmdk_create_spec = vm_util.get_vmdk_create_spec(client_factory,
vmdk_file_size_in_kb, adapter_type)
vmdk_create_task = self._session._call_method(
self._session._get_vim(),
"CreateVirtualDisk_Task",
service_content.virtualDiskManager,
name=uploaded_vmdk_path,
datacenter=self._get_datacenter_name_and_ref()[0],
spec=vmdk_create_spec)
self._session._wait_for_task(instance['uuid'], vmdk_create_task)
LOG.debug(_("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
" KB on the ESX host local store "
"%(data_store_name)s") %
{"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
"data_store_name": data_store_name},
instance=instance)
_create_virtual_disk()
def _delete_disk_file():
LOG.debug(_("Deleting the file %(flat_uploaded_vmdk_path)s "
"on the ESX host local"
"store %(data_store_name)s") %
{"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
"data_store_name": data_store_name},
instance=instance)
# Delete the -flat.vmdk file created. .vmdk file is retained.
vmdk_delete_task = self._session._call_method(
self._session._get_vim(),
"DeleteDatastoreFile_Task",
service_content.fileManager,
name=flat_uploaded_vmdk_path)
self._session._wait_for_task(instance['uuid'], vmdk_delete_task)
LOG.debug(_("Deleted the file %(flat_uploaded_vmdk_path)s on the "
"ESX host local store %(data_store_name)s") %
{"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
"data_store_name": data_store_name},
instance=instance)
_delete_disk_file()
cookies = self._session._get_vim().client.options.transport.cookiejar
def _fetch_image_on_esx_datastore():
"""Fetch image from Glance to ESX datastore."""
LOG.debug(_("Downloading image file data %(image_ref)s to the ESX "
"data store %(data_store_name)s") %
{'image_ref': instance.image_ref,
'data_store_name': data_store_name},
instance=instance)
# Upload the -flat.vmdk file whose meta-data file we just created
# above
vmware_images.fetch_image(
context,
instance.image_ref,
instance,
host=self._session._host_ip,
data_center_name=self._get_datacenter_name_and_ref()[1],
datastore_name=data_store_name,
cookies=cookies,
file_path=flat_uploaded_vmdk_name)
LOG.debug(_("Downloaded image file data %(image_ref)s to the ESX "
"data store %(data_store_name)s") %
{'image_ref': instance.image_ref,
'data_store_name': data_store_name},
instance=instance)
_fetch_image_on_esx_datastore()
vm_ref = self._get_vm_ref_from_the_name(instance.name)
def _attach_vmdk_to_the_vm():
"""
Attach the vmdk uploaded to the VM. VM reconfigure is done
to do so.
"""
vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
client_factory,
vmdk_file_size_in_kb, uploaded_vmdk_path,
adapter_type)
LOG.debug(_("Reconfiguring VM instance to attach the image disk"),
instance=instance)
reconfig_task = self._session._call_method(
self._session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=vmdk_attach_config_spec)
self._session._wait_for_task(instance['uuid'], reconfig_task)
LOG.debug(_("Reconfigured VM instance to attach the image disk"),
instance=instance)
_attach_vmdk_to_the_vm()
def _power_on_vm():
"""Power on the VM."""
LOG.debug(_("Powering on the VM instance"), instance=instance)
# Power On the VM
power_on_task = self._session._call_method(
self._session._get_vim(),
"PowerOnVM_Task", vm_ref)
self._session._wait_for_task(instance['uuid'], power_on_task)
LOG.debug(_("Powered on the VM instance"), instance=instance)
_power_on_vm()
def snapshot(self, context, instance, snapshot_name):
"""Create snapshot from a running VM instance.
Steps followed are:
1. Get the name of the vmdk file which the VM points to right now.
Can be a chain of snapshots, so we need to know the last in the
chain.
2. Create the snapshot. A new vmdk is created which the VM points to
now. The earlier vmdk becomes read-only.
3. Call CopyVirtualDisk which coalesces the disk chain to form a single
vmdk, rather a .vmdk metadata file and a -flat.vmdk disk data file.
4. Now upload the -flat.vmdk file to the image store.
5. Delete the coalesced .vmdk and -flat.vmdk created.
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
def _get_vm_and_vmdk_attribs():
# Get the vmdk file name that the VM is pointing to
hardware_devices = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "config.hardware.device")
_vmdk_info = vm_util.get_vmdk_file_path_and_adapter_type(
client_factory, hardware_devices)
vmdk_file_path_before_snapshot, adapter_type = _vmdk_info
datastore_name = vm_util.split_datastore_path(
vmdk_file_path_before_snapshot)[0]
os_type = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "summary.config.guestId")
return (vmdk_file_path_before_snapshot, adapter_type,
datastore_name, os_type)
(vmdk_file_path_before_snapshot, adapter_type, datastore_name,
os_type) = _get_vm_and_vmdk_attribs()
def _create_vm_snapshot():
# Create a snapshot of the VM
LOG.debug(_("Creating Snapshot of the VM instance"),
instance=instance)
snapshot_task = self._session._call_method(
self._session._get_vim(),
"CreateSnapshot_Task", vm_ref,
name="%s-snapshot" % instance.name,
description="Taking Snapshot of the VM",
memory=True,
quiesce=True)
self._session._wait_for_task(instance['uuid'], snapshot_task)
LOG.debug(_("Created Snapshot of the VM instance"),
instance=instance)
_create_vm_snapshot()
def _check_if_tmp_folder_exists():
# Copy the contents of the VM that were there just before the
# snapshot was taken
ds_ref_ret = vim_util.get_dynamic_property(
self._session._get_vim(),
vm_ref,
"VirtualMachine",
"datastore")
if not ds_ref_ret:
raise exception.DatastoreNotFound()
ds_ref = ds_ref_ret.ManagedObjectReference[0]
ds_browser = vim_util.get_dynamic_property(
self._session._get_vim(),
ds_ref,
"Datastore",
"browser")
# Check if the vmware-tmp folder exists or not. If not, create one
tmp_folder_path = vm_util.build_datastore_path(datastore_name,
"vmware-tmp")
if not self._path_exists(ds_browser, tmp_folder_path):
self._mkdir(vm_util.build_datastore_path(datastore_name,
"vmware-tmp"))
_check_if_tmp_folder_exists()
# Generate a random vmdk file name to which the coalesced vmdk content
# will be copied to. A random name is chosen so that we don't have
# name clashes.
random_name = str(uuid.uuid4())
dest_vmdk_file_location = vm_util.build_datastore_path(datastore_name,
"vmware-tmp/%s.vmdk" % random_name)
dc_ref = self._get_datacenter_name_and_ref()[0]
def _copy_vmdk_content():
# Copy the contents of the disk ( or disks, if there were snapshots
# done earlier) to a temporary vmdk file.
copy_spec = vm_util.get_copy_virtual_disk_spec(client_factory,
adapter_type)
LOG.debug(_('Copying disk data before snapshot of the VM'),
instance=instance)
copy_disk_task = self._session._call_method(
self._session._get_vim(),
"CopyVirtualDisk_Task",
service_content.virtualDiskManager,
sourceName=vmdk_file_path_before_snapshot,
sourceDatacenter=dc_ref,
destName=dest_vmdk_file_location,
destDatacenter=dc_ref,
destSpec=copy_spec,
force=False)
self._session._wait_for_task(instance['uuid'], copy_disk_task)
LOG.debug(_("Copied disk data before snapshot of the VM"),
instance=instance)
_copy_vmdk_content()
cookies = self._session._get_vim().client.options.transport.cookiejar
def _upload_vmdk_to_image_repository():
# Upload the contents of -flat.vmdk file which has the disk data.
LOG.debug(_("Uploading image %s") % snapshot_name,
instance=instance)
vmware_images.upload_image(
context,
snapshot_name,
instance,
os_type=os_type,
adapter_type=adapter_type,
image_version=1,
host=self._session._host_ip,
data_center_name=self._get_datacenter_name_and_ref()[1],
datastore_name=datastore_name,
cookies=cookies,
file_path="vmware-tmp/%s-flat.vmdk" % random_name)
LOG.debug(_("Uploaded image %s") % snapshot_name,
instance=instance)
_upload_vmdk_to_image_repository()
def _clean_temp_data():
"""
Delete temporary vmdk files generated in image handling
operations.
"""
# Delete the temporary vmdk created above.
LOG.debug(_("Deleting temporary vmdk file %s")
% dest_vmdk_file_location, instance=instance)
remove_disk_task = self._session._call_method(
self._session._get_vim(),
"DeleteVirtualDisk_Task",
service_content.virtualDiskManager,
name=dest_vmdk_file_location,
datacenter=dc_ref)
self._session._wait_for_task(instance['uuid'], remove_disk_task)
LOG.debug(_("Deleted temporary vmdk file %s")
% dest_vmdk_file_location, instance=instance)
_clean_temp_data()
def reboot(self, instance, network_info):
"""Reboot a VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
self.plug_vifs(instance, network_info)
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
None, vm_ref, "VirtualMachine",
lst_properties)
pwr_state = None
tools_status = None
tools_running_status = False
for elem in props:
for prop in elem.propSet:
if prop.name == "runtime.powerState":
pwr_state = prop.val
elif prop.name == "summary.guest.toolsStatus":
tools_status = prop.val
elif prop.name == "summary.guest.toolsRunningStatus":
tools_running_status = prop.val
# Raise an exception if the VM is not powered On.
if pwr_state not in ["poweredOn"]:
reason = _("instance is not powered on")
raise exception.InstanceRebootFailure(reason=reason)
# If latest vmware tools are installed in the VM, and that the tools
# are running, then only do a guest reboot. Otherwise do a hard reset.
if (tools_status == "toolsOk" and
tools_running_status == "guestToolsRunning"):
LOG.debug(_("Rebooting guest OS of VM"), instance=instance)
self._session._call_method(self._session._get_vim(), "RebootGuest",
vm_ref)
LOG.debug(_("Rebooted guest OS of VM"), instance=instance)
else:
LOG.debug(_("Doing hard reboot of VM"), instance=instance)
reset_task = self._session._call_method(self._session._get_vim(),
"ResetVM_Task", vm_ref)
self._session._wait_for_task(instance['uuid'], reset_task)
LOG.debug(_("Did hard reboot of VM"), instance=instance)
def destroy(self, instance, network_info):
"""
Destroy a VM instance. Steps followed are:
1. Power off the VM, if it is in poweredOn state.
2. Un-register a VM.
3. Delete the contents of the folder holding the VM related data.
"""
try:
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
LOG.debug(_("instance not present"), instance=instance)
return
lst_properties = ["config.files.vmPathName", "runtime.powerState"]
props = self._session._call_method(vim_util,
"get_object_properties",
None, vm_ref, "VirtualMachine", lst_properties)
pwr_state = None
for elem in props:
vm_config_pathname = None
for prop in elem.propSet:
if prop.name == "runtime.powerState":
pwr_state = prop.val
elif prop.name == "config.files.vmPathName":
vm_config_pathname = prop.val
if vm_config_pathname:
_ds_path = vm_util.split_datastore_path(vm_config_pathname)
datastore_name, vmx_file_path = _ds_path
# Power off the VM if it is in PoweredOn state.
if pwr_state == "poweredOn":
LOG.debug(_("Powering off the VM"), instance=instance)
poweroff_task = self._session._call_method(
self._session._get_vim(),
"PowerOffVM_Task", vm_ref)
self._session._wait_for_task(instance['uuid'], poweroff_task)
LOG.debug(_("Powered off the VM"), instance=instance)
# Un-register the VM
try:
LOG.debug(_("Unregistering the VM"), instance=instance)
self._session._call_method(self._session._get_vim(),
"UnregisterVM", vm_ref)
LOG.debug(_("Unregistered the VM"), instance=instance)
except Exception, excep:
LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
" while un-registering the VM: %s") % str(excep),
instance=instance)
self.unplug_vifs(instance, network_info)
# Delete the folder holding the VM related content on
# the datastore.
try:
dir_ds_compliant_path = vm_util.build_datastore_path(
datastore_name,
os.path.dirname(vmx_file_path))
LOG.debug(_("Deleting contents of the VM from "
"datastore %(datastore_name)s") %
{'datastore_name': datastore_name},
instance=instance)
delete_task = self._session._call_method(
self._session._get_vim(),
"DeleteDatastoreFile_Task",
self._session._get_vim().get_service_content().fileManager,
name=dir_ds_compliant_path)
self._session._wait_for_task(instance['uuid'], delete_task)
LOG.debug(_("Deleted contents of the VM from "
"datastore %(datastore_name)s") %
{'datastore_name': datastore_name},
instance=instance)
except Exception, excep:
LOG.warn(_("In vmwareapi:vmops:destroy, "
"got this exception while deleting"
" the VM contents from the disk: %s")
% str(excep),
instance=instance)
except Exception, exc:
LOG.exception(exc, instance=instance)
def pause(self, instance):
msg = _("pause not supported for vmwareapi")
raise NotImplementedError(msg)
def unpause(self, instance):
msg = _("unpause not supported for vmwareapi")
raise NotImplementedError(msg)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
# Only PoweredOn VMs can be suspended.
if pwr_state == "poweredOn":
LOG.debug(_("Suspending the VM"), instance=instance)
suspend_task = self._session._call_method(self._session._get_vim(),
"SuspendVM_Task", vm_ref)
self._session._wait_for_task(instance['uuid'], suspend_task)
LOG.debug(_("Suspended the VM"), instance=instance)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
reason = _("instance is powered off and can not be suspended.")
raise exception.InstanceSuspendFailure(reason=reason)
LOG.debug(_("VM was already in suspended state. So returning "
"without doing anything"), instance=instance)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
"VirtualMachine", "runtime.powerState")
if pwr_state.lower() == "suspended":
LOG.debug(_("Resuming the VM"), instance=instance)
suspend_task = self._session._call_method(
self._session._get_vim(),
"PowerOnVM_Task", vm_ref)
self._session._wait_for_task(instance['uuid'], suspend_task)
LOG.debug(_("Resumed the VM"), instance=instance)
else:
reason = _("instance is not in a suspended state")
raise exception.InstanceResumeFailure(reason=reason)
def get_info(self, instance):
"""Return data about the VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance['name'])
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance['name'])
lst_properties = ["summary.config.numCpu",
"summary.config.memorySizeMB",
"runtime.powerState"]
vm_props = self._session._call_method(vim_util,
"get_object_properties", None, vm_ref, "VirtualMachine",
lst_properties)
max_mem = None
pwr_state = None
num_cpu = None
for elem in vm_props:
for prop in elem.propSet:
if prop.name == "summary.config.numCpu":
num_cpu = int(prop.val)
elif prop.name == "summary.config.memorySizeMB":
# In MB, but we want in KB
max_mem = int(prop.val) * 1024
elif prop.name == "runtime.powerState":
pwr_state = VMWARE_POWER_STATES[prop.val]
return {'state': pwr_state,
'max_mem': max_mem,
'mem': max_mem,
'num_cpu': num_cpu,
'cpu_time': 0}
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
msg = _("get_diagnostics not implemented for vmwareapi")
raise NotImplementedError(msg)
def get_console_output(self, instance):
"""Return snapshot of console."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
param_list = {"id": str(vm_ref)}
base_url = "%s://%s/screen?%s" % (self._session._scheme,
self._session._host_ip,
urllib.urlencode(param_list))
request = urllib2.Request(base_url)
base64string = base64.encodestring(
'%s:%s' % (
self._session._host_username,
self._session._host_password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
result = urllib2.urlopen(request)
if result.code == 200:
return result.read()
else:
return ""
def _set_machine_id(self, client_factory, instance, network_info):
"""
Set the machine id of the VM for guest tools to pick up and reconfigure
the network interfaces.
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
machine_id_str = ''
for (network, info) in network_info:
# TODO(vish): add support for dns2
# TODO(sateesh): add support for injection of ipv6 configuration
ip_v4 = ip_v6 = None
if 'ips' in info and len(info['ips']) > 0:
ip_v4 = info['ips'][0]
if 'ip6s' in info and len(info['ip6s']) > 0:
ip_v6 = info['ip6s'][0]
if len(info['dns']) > 0:
dns = info['dns'][0]
else:
dns = ''
interface_str = ";".join([info['mac'],
ip_v4 and ip_v4['ip'] or '',
ip_v4 and ip_v4['netmask'] or '',
info['gateway'],
info['broadcast'],
dns])
machine_id_str = machine_id_str + interface_str + '#'
machine_id_change_spec = vm_util.get_machine_id_change_spec(
client_factory, machine_id_str)
LOG.debug(_("Reconfiguring VM instance to set the machine id "
"with ip - %(ip_addr)s") %
{'ip_addr': ip_v4['ip']},
instance=instance)
reconfig_task = self._session._call_method(self._session._get_vim(),
"ReconfigVM_Task", vm_ref,
spec=machine_id_change_spec)
self._session._wait_for_task(instance['uuid'], reconfig_task)
LOG.debug(_("Reconfigured VM instance to set the machine id "
"with ip - %(ip_addr)s") %
{'ip_addr': ip_v4['ip']},
instance=instance)
def _get_datacenter_name_and_ref(self):
"""Get the datacenter name and the reference."""
dc_obj = self._session._call_method(vim_util, "get_objects",
"Datacenter", ["name"])
return dc_obj[0].obj, dc_obj[0].propSet[0].val
def _path_exists(self, ds_browser, ds_path):
"""Check if the path exists on the datastore."""
search_task = self._session._call_method(self._session._get_vim(),
"SearchDatastore_Task",
ds_browser,
datastorePath=ds_path)
# Wait till the state changes from queued or running.
# If an error state is returned, it means that the path doesn't exist.
while True:
task_info = self._session._call_method(vim_util,
"get_dynamic_property",
search_task, "Task", "info")
if task_info.state in ['queued', 'running']:
time.sleep(2)
continue
break
if task_info.state == "error":
return False
return True
def _mkdir(self, ds_path):
"""
Creates a directory at the path specified. If it is just "NAME",
then a directory with this name is created at the topmost level of the
DataStore.
"""
LOG.debug(_("Creating directory with path %s") % ds_path)
self._session._call_method(self._session._get_vim(), "MakeDirectory",
self._session._get_vim().get_service_content().fileManager,
name=ds_path, createParentDirectories=False)
LOG.debug(_("Created directory with path %s") % ds_path)
def _get_vm_ref_from_the_name(self, vm_name):
"""Get reference to the VM with the name specified."""
vms = self._session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
for vm in vms:
if vm.propSet[0].val == vm_name:
return vm.obj
return None
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for (network, mapping) in network_info:
self._vif_driver.plug(instance, (network, mapping))
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
for (network, mapping) in network_info:
self._vif_driver.unplug(instance, (network, mapping))
|
{
"content_hash": "f3da318ddd09b72f6a447508afc1ae6d",
"timestamp": "",
"source": "github",
"line_count": 815,
"max_line_length": 79,
"avg_line_length": 46.011042944785274,
"alnum_prop": 0.5252140057068189,
"repo_name": "aristanetworks/arista-ovs-nova",
"id": "97270fc063020393aa81b1ca3a68d3983185bffa",
"size": "38193",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/vmwareapi/vmops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "6938504"
},
{
"name": "Shell",
"bytes": "16524"
}
],
"symlink_target": ""
}
|
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._images_operations import (
build_create_or_update_request,
build_get_request,
build_list_by_lab_plan_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ImagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.labservices.aio.ManagedLabsClient`'s
:attr:`images` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_lab_plan(
self, resource_group_name: str, lab_plan_name: str, filter: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.Image"]:
"""Gets all images.
Gets all images from galleries attached to a lab plan.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param lab_plan_name: The name of the lab plan that uniquely identifies it within containing
resource group. Used in resource URIs and in UI. Required.
:type lab_plan_name: str
:param filter: The filter to apply to the operation. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Image or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.labservices.models.Image]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.PagedImages]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_lab_plan_request(
resource_group_name=resource_group_name,
lab_plan_name=lab_plan_name,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list_by_lab_plan.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PagedImages", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_lab_plan.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}/images"} # type: ignore
@distributed_trace_async
async def get(self, resource_group_name: str, lab_plan_name: str, image_name: str, **kwargs: Any) -> _models.Image:
"""Gets an image.
Gets an image resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param lab_plan_name: The name of the lab plan that uniquely identifies it within containing
resource group. Used in resource URIs and in UI. Required.
:type lab_plan_name: str
:param image_name: The image name. Required.
:type image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Image or the result of cls(response)
:rtype: ~azure.mgmt.labservices.models.Image
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.Image]
request = build_get_request(
resource_group_name=resource_group_name,
lab_plan_name=lab_plan_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Image", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}/images/{imageName}"} # type: ignore
@overload
async def create_or_update(
self,
resource_group_name: str,
lab_plan_name: str,
image_name: str,
body: _models.Image,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Image:
"""Updates an image via PUT.
Updates an image resource via PUT. Creating new resources via PUT will not function.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param lab_plan_name: The name of the lab plan that uniquely identifies it within containing
resource group. Used in resource URIs and in UI. Required.
:type lab_plan_name: str
:param image_name: The image name. Required.
:type image_name: str
:param body: The request body. Required.
:type body: ~azure.mgmt.labservices.models.Image
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Image or the result of cls(response)
:rtype: ~azure.mgmt.labservices.models.Image
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
lab_plan_name: str,
image_name: str,
body: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Image:
"""Updates an image via PUT.
Updates an image resource via PUT. Creating new resources via PUT will not function.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param lab_plan_name: The name of the lab plan that uniquely identifies it within containing
resource group. Used in resource URIs and in UI. Required.
:type lab_plan_name: str
:param image_name: The image name. Required.
:type image_name: str
:param body: The request body. Required.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Image or the result of cls(response)
:rtype: ~azure.mgmt.labservices.models.Image
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
lab_plan_name: str,
image_name: str,
body: Union[_models.Image, IO],
**kwargs: Any
) -> _models.Image:
"""Updates an image via PUT.
Updates an image resource via PUT. Creating new resources via PUT will not function.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param lab_plan_name: The name of the lab plan that uniquely identifies it within containing
resource group. Used in resource URIs and in UI. Required.
:type lab_plan_name: str
:param image_name: The image name. Required.
:type image_name: str
:param body: The request body. Is either a model type or a IO type. Required.
:type body: ~azure.mgmt.labservices.models.Image or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Image or the result of cls(response)
:rtype: ~azure.mgmt.labservices.models.Image
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Image]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IO, bytes)):
_content = body
else:
_json = self._serialize.body(body, "Image")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
lab_plan_name=lab_plan_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Image", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}/images/{imageName}"} # type: ignore
@overload
async def update(
self,
resource_group_name: str,
lab_plan_name: str,
image_name: str,
body: _models.ImageUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Image:
"""Updates an image.
Updates an image resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param lab_plan_name: The name of the lab plan that uniquely identifies it within containing
resource group. Used in resource URIs and in UI. Required.
:type lab_plan_name: str
:param image_name: The image name. Required.
:type image_name: str
:param body: The request body. Required.
:type body: ~azure.mgmt.labservices.models.ImageUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Image or the result of cls(response)
:rtype: ~azure.mgmt.labservices.models.Image
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
lab_plan_name: str,
image_name: str,
body: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Image:
"""Updates an image.
Updates an image resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param lab_plan_name: The name of the lab plan that uniquely identifies it within containing
resource group. Used in resource URIs and in UI. Required.
:type lab_plan_name: str
:param image_name: The image name. Required.
:type image_name: str
:param body: The request body. Required.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Image or the result of cls(response)
:rtype: ~azure.mgmt.labservices.models.Image
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
lab_plan_name: str,
image_name: str,
body: Union[_models.ImageUpdate, IO],
**kwargs: Any
) -> _models.Image:
"""Updates an image.
Updates an image resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param lab_plan_name: The name of the lab plan that uniquely identifies it within containing
resource group. Used in resource URIs and in UI. Required.
:type lab_plan_name: str
:param image_name: The image name. Required.
:type image_name: str
:param body: The request body. Is either a model type or a IO type. Required.
:type body: ~azure.mgmt.labservices.models.ImageUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Image or the result of cls(response)
:rtype: ~azure.mgmt.labservices.models.Image
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Image]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IO, bytes)):
_content = body
else:
_json = self._serialize.body(body, "ImageUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
lab_plan_name=lab_plan_name,
image_name=image_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Image", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}/images/{imageName}"} # type: ignore
|
{
"content_hash": "f446deb73c2e6c1a0d64fd0343b96004",
"timestamp": "",
"source": "github",
"line_count": 500,
"max_line_length": 199,
"avg_line_length": 43.752,
"alnum_prop": 0.6398793198025233,
"repo_name": "Azure/azure-sdk-for-python",
"id": "fb4ecedabbb2c0150d6f272415af9595f2c43ead",
"size": "22376",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/labservices/azure-mgmt-labservices/azure/mgmt/labservices/aio/operations/_images_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import os
import json
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
_config = None
class CalvinConfig(object):
"""
Handle configuration of Calvin, works similiarly to python's ConfigParser
Looks for calvin.conf or .calvin.conf files in:
1. Built-ins
2. Calvin's install directory
3. $HOME
4. all directories between $CWD and $HOME
5. current working directory ($CWD)
If $CWD is outside of $HOME, only (1) through (3) are searched.
Simple values are overridden by later configs, whereas lists are prepended by later configs.
If the environment variable CALVIN_CONFIG_PATH is set, it will be taken as a path to the ONLY
configuration file, overriding even built-ins.
Finally, wildcard environment variables on the form CALVIN_<SECTION>_<OPTION> may override
options read from defaults or config files. <SECTION> must be one of GLOBAL, TESTING, or DEVELOPER,
e.g. CALVIN_TESTING_UNITTEST_LOOPS=42
Printing the config object provides a great deal of information about the configuration.
"""
def __init__(self):
super(CalvinConfig, self).__init__()
self.config = {}
self.wildcards = []
self.override_path = os.environ.get('CALVIN_CONFIG_PATH', None)
# Setting CALVIN_CONFIG_PATH takes preceedence over all other configs
if self.override_path is not None:
config = self.config_at_path(self.override_path)
if config is not None:
self.set_config(self.config_at_path(self.override_path))
else:
self.override_path = None
_log.info("CALVIN_CONFIG_PATH does not point to a valid config file.")
# This is the normal config procedure
if self.override_path is None:
# The next line is guaranteed to work, so we have at least a default config
self.set_config(self.default_config())
conf_paths = self.config_paths()
for p in conf_paths:
delta_config = self.config_at_path(p)
self.update_config(delta_config)
# Check if any options were set on the command line
self.set_wildcards()
_log.debug("\n{0}\n{1}\n{0}".format("-"*80, self))
def default_config(self):
default = {
'global':{
'comment': 'User definable section',
'actor_paths': ['systemactors'],
'framework': 'twistedimpl',
'storage_proxy': None,
'storage_start': True,
'remote_coder_negotiator': 'static',
'static_coder': 'json'
},
'testing': {
'comment': 'Test settings',
'unittest_loops': 2
},
'developer': {
'comment': 'Experimental settings',
}
}
return default
def add_section(self, section):
"""Add a named section"""
self.config.setdefault(section.lower(), {})
def get_in_order(self, option, default=None):
v = self.get('ARGUMENTS', option)
if v is None:
v = self.get('GLOBAL', option)
if v is None:
v = default
return v
def get(self, section, option):
"""Get value of option in named section, if section is None 'global' section is implied."""
try:
_section = 'global' if section is None else section.lower()
_option = option.lower()
return self.config[_section][_option]
except Exception as e:
return None
def set(self, section, option, value):
"""Set value of option in named section"""
_section = self.config[section.lower()]
_section[option.lower()] = value
def append(self, section, option, value):
"""Append value (list) of option in named section"""
_section = self.config[section.lower()]
_option = option.lower()
old_value = _section.setdefault(_option, [])
if type(old_value) is not list :
raise Exception("Can't append, {}:{} is not a list".format(section, option))
if type(value) is not list:
raise Exception("Can't append, value is not a list")
_section[_option][:0] = value
def set_config(self, config):
"""Set complete config"""
for section in config:
_section = section.lower()
self.add_section(_section)
for option, value in config[section].iteritems():
_option = option.lower()
self.set(_section, _option, value)
def _case_sensitive_keys(self, section, option, conf):
"""Return the case sensitive keys for 'secton' and 'option' (or None if not present) in 'conf'."""
for _section in conf:
if _section.lower() != section.lower():
continue
for _option in conf[section]:
if _option.lower() == option.lower():
return _section, _option
return _section, None
return None, None
def _expand_actor_paths(self, conf, conf_dir):
"""Expand $HOME, $USER etc. and resolve './actors' etc. relative to the config file."""
# Get the correct keys to use with the config dict since we allow mixed case, but convert to lower internally
_section, _option = self._case_sensitive_keys('global', 'actor_paths', conf)
if not _option:
return
paths = conf[_section][_option]
# First handle expansion of env vars
expanded = [os.path.expandvars(p) for p in paths]
# Normalize and handle './', i.e. relative to config file
conf[_section][_option] = [os.path.normpath(os.path.join(conf_dir, p) if p.startswith('./') else p) for p in expanded]
def config_at_path(self, path):
"""Returns config or None if no config at path."""
if os.path.exists(path+'/calvin.conf'):
confpath = path+'/calvin.conf'
elif os.path.exists(path+'/.calvin.conf'):
confpath = path+'/.calvin.conf'
else:
return None
try:
with open(confpath) as f:
conf = json.loads(f.read())
self._expand_actor_paths(conf, path)
except Exception as e:
_log.info("Could not read config at '{}'".format(confpath))
conf = None
return conf
def update_config(self, delta_config):
"""
Update config using delta_config.
If value in delta_config is list, prepend to value in config,
otherwise replace value in config.
"""
if not delta_config:
return
for section in delta_config:
for option, value in delta_config[section].iteritems():
if option.lower() == 'comment':
continue
operation = self.append if type(value) is list else self.set
operation(section, option, value)
def install_location(self):
"""Return the 'installation dir'."""
this_dir = os.path.dirname(os.path.realpath(__file__))
install_dir = os.path.abspath(os.path.join(this_dir, '..'))
return install_dir
def config_paths(self):
"""
Return the install dir and list of paths from $HOME to the current working directory (CWD),
unless CWD is not rooted in $HOME in which case only install dir and $HOME is returned.
If install dir is in the path from $HOME to CWD it is not included a second time.
"""
if self.override_path is not None:
return [self.override_path]
inst_loc = self.install_location()
curr_loc = os.getcwd()
home = os.environ.get('HOME', curr_loc)
paths = [home, inst_loc]
if not curr_loc.startswith(home):
return paths
dpaths = []
while len(curr_loc) > len(home):
if curr_loc != inst_loc:
dpaths.append(curr_loc)
curr_loc, part = curr_loc.rsplit('/', 1)
return dpaths + paths
def set_wildcards(self):
"""
Allow environment variables on the form CALVIN_<SECTION>_<OPTION> to override options
read from defaults or config files. <SECTION> must be one of GLOBAL, TESTING, or DEVELOPER.
"""
wildcards = [e for e in os.environ if e.startswith('CALVIN_') and e != 'CALVIN_CONFIG_PATH']
for wildcard in wildcards:
parts = wildcard.split('_', 2)
if len(parts) < 3 or parts[1] not in ['GLOBAL', 'TESTING', 'DEVELOPER', 'ARGUMENTS']:
_log.info("Malformed evironment variable {}, skipping.".format(wildcard))
continue
section, option = parts[1:3]
value = os.environ[wildcard]
try:
self.set(section, option, json.loads(value))
self.wildcards.append(wildcard)
except Exception as e:
_log.warning("Value {} of evironment variable {} is malformed, skipping.".format(repr(value), wildcard))
def __str__(self):
d = {}
d['config searchpaths'] = self.config_paths(),
d['config paths'] = [p for p in self.config_paths() if self.config_at_path(p) is not None],
d['config'] = self.config
d['CALVIN_CONFIG_PATH'] = self.override_path
d['wildcards'] = self.wildcards
return self.__class__.__name__ + " : " + json.dumps(d, indent=4, sort_keys=True)
def get():
global _config
if _config is None:
_config = CalvinConfig()
return _config
if __name__ == "__main__":
os.environ['CALVIN_CONFIG_PATH'] = '/Users/eperspe/Source/spikes/ConfigParser'
os.environ['CALVIN_TESTING_UNITTEST_LOOPS'] = '44'
a = get()
print(a)
p = a.get('global', 'actor_paths')
print(p, type(p))
p = a.get(None, 'framework')
print(p, type(p))
p = a.get(None, 'unittest_loops')
print(p, type(p))
p = a.get('Testing', 'unittest_loops')
print(p, type(p))
|
{
"content_hash": "38dcd021a896c468105826546f252bad",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 126,
"avg_line_length": 37.83955223880597,
"alnum_prop": 0.5765703579528646,
"repo_name": "KaptenJon/calvin-base",
"id": "a8f0cc0497c50085b968da9d1be00e056a3887a1",
"size": "10746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvin/utilities/calvinconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "39967"
},
{
"name": "JavaScript",
"bytes": "9947"
},
{
"name": "Python",
"bytes": "845925"
}
],
"symlink_target": ""
}
|
'''
Run nagios plugins/checks from salt and get the return as data.
'''
# Import python libs
from __future__ import absolute_import
import os
import stat
import logging
# Import 3rd-party libs
import salt.ext.six as six
log = logging.getLogger(__name__)
PLUGINDIR = '/usr/lib/nagios/plugins/'
def __virtual__():
'''
Only load if nagios-plugins are installed
'''
if os.path.isdir('/usr/lib/nagios/'):
return 'nagios'
return False
def _execute_cmd(plugin, args='', run_type='cmd.retcode'):
'''
Execute nagios plugin if it's in the directory with salt command specified in run_type
'''
data = {}
all_plugins = list_plugins()
if plugin in all_plugins:
data = __salt__[run_type](
'{0}{1} {2}'.format(PLUGINDIR, plugin, args),
python_shell=False)
return data
def _execute_pillar(pillar_name, run_type):
'''
Run one or more nagios plugins from pillar data and get the result of run_type
The pillar have to be in this format:
------
webserver:
Ping_google:
- check_icmp: 8.8.8.8
- check_icmp: google.com
Load:
- check_load: -w 0.8 -c 1
APT:
- check_apt
-------
'''
groups = __salt__['pillar.get'](pillar_name)
data = {}
for group in groups:
data[group] = {}
commands = groups[group]
for command in commands:
# Check if is a dict to get the arguments
# in command if not set the arguments to empty string
if isinstance(command, dict):
plugin = next(six.iterkeys(command))
args = command[plugin]
else:
plugin = command
args = ''
command_key = _format_dict_key(args, plugin)
data[group][command_key] = run_type(plugin, args)
return data
def _format_dict_key(args, plugin):
key_name = plugin
args_key = args.replace(' ', '')
if args != '':
args_key = '_' + args_key
key_name = plugin + args_key
return key_name
def run(plugin, args=''):
'''
Run nagios plugin and return all the data execution with cmd.run
'''
data = _execute_cmd(plugin, args, 'cmd.run')
return data
def retcode(plugin, args='', key_name=None):
'''
Run one nagios plugin and return retcode of the execution
CLI Example:
.. code-block:: bash
salt '*' nagios.run check_apt
salt '*' nagios.run check_icmp '8.8.8.8'
'''
data = {}
# Remove all the spaces, the key must not have any space
if key_name is None:
key_name = _format_dict_key(args, plugin)
data[key_name] = {}
status = _execute_cmd(plugin, args, 'cmd.retcode')
data[key_name]['status'] = status
return data
def run_all(plugin, args=''):
'''
Run nagios plugin and return all the data execution with cmd.run_all
'''
data = _execute_cmd(plugin, args, 'cmd.run_all')
return data
def retcode_pillar(pillar_name):
'''
Run one or more nagios plugins from pillar data and get the result of cmd.retcode
The pillar have to be in this format::
------
webserver:
Ping_google:
- check_icmp: 8.8.8.8
- check_icmp: google.com
Load:
- check_load: -w 0.8 -c 1
APT:
- check_apt
-------
webserver is the role to check, the next keys are the group and the items
the check with the arguments if needed
You must to group different checks(one o more) and always it will return
the highest value of all the checks
CLI Example:
.. code-block:: bash
salt '*' nagios.retcode webserver
'''
groups = __salt__['pillar.get'](pillar_name)
check = {}
data = {}
for group in groups:
commands = groups[group]
for command in commands:
# Check if is a dict to get the arguments
# in command if not set the arguments to empty string
if isinstance(command, dict):
plugin = next(six.iterkeys(command))
args = command[plugin]
else:
plugin = command
args = ''
check.update(retcode(plugin, args, group))
current_value = 0
new_value = int(check[group]['status'])
if group in data:
current_value = int(data[group]['status'])
if (new_value > current_value) or (group not in data):
if group not in data:
data[group] = {}
data[group]['status'] = new_value
return data
def run_pillar(pillar_name):
'''
Run one or more nagios plugins from pillar data and get the result of cmd.run
The pillar have to be in this format::
------
webserver:
Ping_google:
- check_icmp: 8.8.8.8
- check_icmp: google.com
Load:
- check_load: -w 0.8 -c 1
APT:
- check_apt
-------
webserver is the role to check, the next keys are the group and the items
the check with the arguments if needed
You have to group different checks in a group
CLI Example:
.. code-block:: bash
salt '*' nagios.run webserver
'''
data = _execute_pillar(pillar_name, run)
return data
def run_all_pillar(pillar_name):
'''
Run one or more nagios plugins from pillar data and get the result of cmd.run_all
The pillar have to be in this format::
------
webserver:
Ping_google:
- check_icmp: 8.8.8.8
- check_icmp: google.com
Load:
- check_load: -w 0.8 -c 1
APT:
- check_apt
-------
webserver is the role to check, the next keys are the group and the items
the check with the arguments if needed
You have to group different checks in a group
CLI Example:
.. code-block:: bash
salt '*' nagios.run webserver
'''
data = _execute_pillar(pillar_name, run_all)
return data
def list_plugins():
'''
List all the nagios plugins
CLI Example:
.. code-block:: bash
salt '*' nagios.list_plugins
'''
plugin_list = os.listdir(PLUGINDIR)
ret = []
for plugin in plugin_list:
# Check if execute bit
stat_f = os.path.join(PLUGINDIR, plugin)
execute_bit = stat.S_IXUSR & os.stat(stat_f)[stat.ST_MODE]
if execute_bit:
ret.append(plugin)
return ret
|
{
"content_hash": "2b73971c005b60e788738b0e5619115b",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 90,
"avg_line_length": 24.560439560439562,
"alnum_prop": 0.5522744220730798,
"repo_name": "smallyear/linuxLearn",
"id": "db937f066ef4dde8cc68640076738d37aa76700b",
"size": "6729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/salt/modules/nagios.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "269"
},
{
"name": "CSS",
"bytes": "35"
},
{
"name": "HTML",
"bytes": "23373"
},
{
"name": "JavaScript",
"bytes": "510"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "12800734"
},
{
"name": "Shell",
"bytes": "240576"
}
],
"symlink_target": ""
}
|
import numpy as np
from numpy import (dot, eye, diag_indices, zeros, column_stack, ones, diag,
asarray, r_)
from numpy.linalg import inv, solve
#from scipy.linalg import block_diag
from scipy import linalg
#def denton(indicator, benchmark, freq="aq", **kwarg):
# """
# Denton's method to convert low-frequency to high frequency data.
#
# Parameters
# ----------
# benchmark : array-like
# The higher frequency benchmark. A 1d or 2d data series in columns.
# If 2d, then M series are assumed.
# indicator
# A low-frequency indicator series. It is assumed that there are no
# pre-sample indicators. Ie., the first indicators line up with
# the first benchmark.
# freq : str {"aq","qm", "other"}
# "aq" - Benchmarking an annual series to quarterly.
# "mq" - Benchmarking a quarterly series to monthly.
# "other" - Custom stride. A kwarg, k, must be supplied.
# kwargs :
# k : int
# The number of high-frequency observations that sum to make an
# aggregate low-frequency observation. `k` is used with
# `freq` == "other".
# Returns
# -------
# benchmarked series : array
#
# Notes
# -----
# Denton's method minimizes the distance given by the penalty function, in
# a least squares sense, between the unknown benchmarked series and the
# indicator series subject to the condition that the sum of the benchmarked
# series is equal to the benchmark.
#
#
# References
# ----------
# Bloem, A.M, Dippelsman, R.J. and Maehle, N.O. 2001 Quarterly National
# Accounts Manual--Concepts, Data Sources, and Compilation. IMF.
# http://www.imf.org/external/pubs/ft/qna/2000/Textbook/index.htm
# Denton, F.T. 1971. "Adjustment of monthly or quarterly series to annual
# totals: an approach based on quadratic minimization." Journal of the
# American Statistical Association. 99-102.
#
# """
# # check arrays and make 2d
# indicator = np.asarray(indicator)
# if indicator.ndim == 1:
# indicator = indicator[:,None]
# benchmark = np.asarray(benchmark)
# if benchmark.ndim == 1:
# benchmark = benchmark[:,None]
#
# # get dimensions
# N = len(indicator) # total number of high-freq
# m = len(benchmark) # total number of low-freq
#
# # number of low-freq observations for aggregate measure
# # 4 for annual to quarter and 3 for quarter to monthly
# if freq == "aq":
# k = 4
# elif freq == "qm":
# k = 3
# elif freq == "other":
# k = kwargs.get("k")
# if not k:
# raise ValueError("k must be supplied with freq=\"other\"")
# else:
# raise ValueError("freq %s not understood" % freq)
#
# n = k*m # number of indicator series with a benchmark for back-series
# # if k*m != n, then we are going to extrapolate q observations
#
# B = block_diag(*(np.ones((k,1)),)*m)
#
# r = benchmark - B.T.dot(indicator)
#TODO: take code in the string at the end and implement Denton's original
# method with a few of the penalty functions.
def dentonm(indicator, benchmark, freq="aq", **kwargs):
"""
Modified Denton's method to convert low-frequency to high-frequency data.
Uses proportionate first-differences as the penalty function. See notes.
Parameters
----------
indicator
A low-frequency indicator series. It is assumed that there are no
pre-sample indicators. Ie., the first indicators line up with
the first benchmark.
benchmark : array-like
The higher frequency benchmark. A 1d or 2d data series in columns.
If 2d, then M series are assumed.
freq : str {"aq","qm", "other"}
"aq" - Benchmarking an annual series to quarterly.
"mq" - Benchmarking a quarterly series to monthly.
"other" - Custom stride. A kwarg, k, must be supplied.
kwargs :
k : int
The number of high-frequency observations that sum to make an
aggregate low-frequency observation. `k` is used with
`freq` == "other".
Returns
-------
benchmarked series : array
Examples
--------
>>> indicator = [50,100,150,100] * 5
>>> benchmark = [500,400,300,400,500]
>>> benchmarked = dentonm(indicator, benchmark, freq="aq")
Notes
-----
Denton's method minimizes the distance given by the penalty function, in
a least squares sense, between the unknown benchmarked series and the
indicator series subject to the condition that the sum of the benchmarked
series is equal to the benchmark. The modification allows that the first
value not be pre-determined as is the case with Denton's original method.
If the there is no benchmark provided for the last few indicator
observations, then extrapolation is performed using the last
benchmark-indicator ratio of the previous period.
Minimizes sum((X[t]/I[t] - X[t-1]/I[t-1])**2)
s.t.
sum(X) = A, for each period. Where X is the benchmarked series, I is
the indicator, and A is the benchmark.
References
----------
Bloem, A.M, Dippelsman, R.J. and Maehle, N.O. 2001 Quarterly National
Accounts Manual--Concepts, Data Sources, and Compilation. IMF.
http://www.imf.org/external/pubs/ft/qna/2000/Textbook/index.htm
Cholette, P. 1988. "Benchmarking systems of socio-economic time series."
Statistics Canada, Time Series Research and Analysis Division,
Working Paper No TSRA-88-017E.
Denton, F.T. 1971. "Adjustment of monthly or quarterly series to annual
totals: an approach based on quadratic minimization." Journal of the
American Statistical Association. 99-102.
"""
# penalty : str
# Penalty function. Can be "D1", "D2", "D3", "D4", "D5".
# X is the benchmarked series and I is the indicator.
# D1 - sum((X[t] - X[t-1]) - (I[t] - I[ti-1])**2)
# D2 - sum((ln(X[t]/X[t-1]) - ln(I[t]/I[t-1]))**2)
# D3 - sum((X[t]/X[t-1] / I[t]/I[t-1])**2)
# D4 - sum((X[t]/I[t] - X[t-1]/I[t-1])**2)
# D5 - sum((X[t]/I[t] / X[t-1]/I[t-1] - 1)**2)
#NOTE: only D4 is the only one implemented, see IMF chapter 6.
# check arrays and make 2d
indicator = asarray(indicator)
if indicator.ndim == 1:
indicator = indicator[:,None]
benchmark = asarray(benchmark)
if benchmark.ndim == 1:
benchmark = benchmark[:,None]
# get dimensions
N = len(indicator) # total number of high-freq
m = len(benchmark) # total number of low-freq
# number of low-freq observations for aggregate measure
# 4 for annual to quarter and 3 for quarter to monthly
if freq == "aq":
k = 4
elif freq == "qm":
k = 3
elif freq == "other":
k = kwargs.get("k")
if not k:
raise ValueError("k must be supplied with freq=\"other\"")
else:
raise ValueError("freq %s not understood" % freq)
n = k*m # number of indicator series with a benchmark for back-series
# if k*m != n, then we are going to extrapolate q observations
if N > n:
q = N - n
else:
q = 0
# make the aggregator matrix
#B = block_diag(*(ones((k,1)),)*m)
B = np.kron(np.eye(m), ones((k,1)))
# following the IMF paper, we can do
Zinv = diag(1./indicator.squeeze()[:n])
# this is D in Denton's notation (not using initial value correction)
# D = eye(n)
# make off-diagonal = -1
# D[((np.diag_indices(n)[0])[:-1]+1,(np.diag_indices(n)[1])[:-1])] = -1
# account for starting conditions
# H = D[1:,:]
# HTH = dot(H.T,H)
# just make HTH
HTH = eye(n)
diag_idx0, diag_idx1 = diag_indices(n)
HTH[diag_idx0[1:-1], diag_idx1[1:-1]] += 1
HTH[diag_idx0[:-1]+1, diag_idx1[:-1]] = -1
HTH[diag_idx0[:-1], diag_idx1[:-1]+1] = -1
W = dot(dot(Zinv,HTH),Zinv)
# make partitioned matrices
#TODO: break this out so that we can simplify the linalg?
I = zeros((n+m,n+m))
I[:n,:n] = W
I[:n,n:] = B
I[n:,:n] = B.T
A = zeros((m+n,1)) # zero first-order constraints
A[-m:] = benchmark # adding up constraints
X = solve(I,A)
X = X[:-m] # drop the lagrange multipliers
# handle extrapolation
if q > 0:
# get last Benchmark-Indicator ratio
bi = X[n-1]/indicator[n-1]
extrapolated = bi * indicator[n:]
X = r_[X,extrapolated]
return X.squeeze()
if __name__ == "__main__":
import numpy as np
#these will be the tests
# from IMF paper
# quarterly data
indicator = np.array([98.2, 100.8, 102.2, 100.8, 99.0, 101.6,
102.7, 101.5, 100.5, 103.0, 103.5, 101.5])
# two annual observations
benchmark = np.array([4000.,4161.4])
x_imf = dentonm(indicator, benchmark, freq="aq")
imf_stata = np.array([969.8, 998.4, 1018.3, 1013.4, 1007.2, 1042.9,
1060.3, 1051.0, 1040.6, 1066.5, 1071.7, 1051.0])
np.testing.assert_almost_equal(imf_stata, x_imf, 1)
# Denton example
zQ = np.array([50,100,150,100] * 5)
Y = np.array([500,400,300,400,500])
x_denton = dentonm(zQ, Y, freq="aq")
x_stata = np.array([64.334796,127.80616,187.82379,120.03526,56.563894,
105.97568,147.50144,89.958987,40.547201,74.445963,
108.34473,76.66211,42.763347,94.14664,153.41596,
109.67405,58.290761,122.62556,190.41409,128.66959])
"""
# Examples from the Denton 1971 paper
k = 4
m = 5
n = m*k
zQ = [50,100,150,100] * m
Y = [500,400,300,400,500]
A = np.eye(n)
B = block_diag(*(np.ones((k,1)),)*m)
r = Y - B.T.dot(zQ)
#Ainv = inv(A)
Ainv = A # shortcut for identity
C = Ainv.dot(B).dot(inv(B.T.dot(Ainv).dot(B)))
x = zQ + C.dot(r)
# minimize first difference d(x-z)
R = linalg.tri(n, dtype=float) # R is tril so actually R.T in paper
Ainv = R.dot(R.T)
C = Ainv.dot(B).dot(inv(B.T.dot(Ainv).dot(B)))
x1 = zQ + C.dot(r)
# minimize the second difference d**2(x-z)
Ainv = R.dot(Ainv).dot(R.T)
C = Ainv.dot(B).dot(inv(B.T.dot(Ainv).dot(B)))
x12 = zQ + C.dot(r)
# # do it proportionately (x-z)/z
Z = np.diag(zQ)
Ainv = np.eye(n)
C = Z.dot(Ainv).dot(Z).dot(B).dot(inv(B.T.dot(Z).dot(Ainv).dot(Z).dot(B)))
x11 = zQ + C.dot(r)
# do it proportionately with differencing d((x-z)/z)
Ainv = R.dot(R.T)
C = Z.dot(Ainv).dot(Z).dot(B).dot(inv(B.T.dot(Z).dot(Ainv).dot(Z).dot(B)))
x111 = zQ + C.dot(r)
x_stata = np.array([64.334796,127.80616,187.82379,120.03526,56.563894,
105.97568,147.50144,89.958987,40.547201,74.445963,
108.34473,76.66211,42.763347,94.14664,153.41596,
109.67405,58.290761,122.62556,190.41409,128.66959])
"""
|
{
"content_hash": "dacb5132510ae9dae97762d2323da436",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 80,
"avg_line_length": 34.94805194805195,
"alnum_prop": 0.6069305091044221,
"repo_name": "ChadFulton/statsmodels",
"id": "ae305e87679b3f7e15f9159cf7bc055d5b2842a8",
"size": "10765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statsmodels/tsa/interp/denton.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "3469"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "2609"
},
{
"name": "Python",
"bytes": "11749760"
},
{
"name": "R",
"bytes": "90986"
},
{
"name": "Rebol",
"bytes": "123"
},
{
"name": "Shell",
"bytes": "8181"
},
{
"name": "Smarty",
"bytes": "1014"
},
{
"name": "Stata",
"bytes": "65045"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function
import os
import unittest2 as unittest
if os.environ.get('USE_TWISTED', False):
from mock import patch
from zope.interface import implementer
from twisted.internet.interfaces import IReactorTime
@implementer(IReactorTime)
class FakeReactor(object):
'''
This just fakes out enough reactor methods so .run() can work.
'''
stop_called = False
def __init__(self, to_raise):
self.stop_called = False
self.to_raise = to_raise
self.delayed = []
def run(self, *args, **kw):
raise self.to_raise
def stop(self):
self.stop_called = True
def callLater(self, delay, func, *args, **kwargs):
self.delayed.append((delay, func, args, kwargs))
def connectTCP(self, *args, **kw):
raise RuntimeError("ConnectTCP shouldn't get called")
class TestWampTwistedRunner(unittest.TestCase):
def test_connect_error(self):
'''
Ensure the runner doesn't swallow errors and that it exits the
reactor properly if there is one.
'''
try:
from autobahn.twisted.wamp import ApplicationRunner
from twisted.internet.error import ConnectionRefusedError
# the 'reactor' member doesn't exist until we import it
from twisted.internet import reactor # noqa: F401
except ImportError:
raise unittest.SkipTest('No twisted')
runner = ApplicationRunner(u'ws://localhost:1', u'realm')
exception = ConnectionRefusedError("It's a trap!")
with patch('twisted.internet.reactor', FakeReactor(exception)) as mockreactor:
self.assertRaises(
ConnectionRefusedError,
# pass a no-op session-creation method
runner.run, lambda _: None, start_reactor=True
)
self.assertTrue(mockreactor.stop_called)
else:
# Asyncio tests.
try:
import asyncio
from unittest.mock import patch, Mock
except ImportError:
# Trollius >= 0.3 was renamed to asyncio
# noinspection PyUnresolvedReferences
import trollius as asyncio
from mock import patch, Mock
from autobahn.asyncio.wamp import ApplicationRunner
class TestApplicationRunner(unittest.TestCase):
'''
Test the autobahn.asyncio.wamp.ApplicationRunner class.
'''
def _assertRaisesRegex(self, exception, error, *args, **kw):
try:
self.assertRaisesRegex
except AttributeError:
f = self.assertRaisesRegexp
else:
f = self.assertRaisesRegex
f(exception, error, *args, **kw)
def test_explicit_SSLContext(self):
'''
Ensure that loop.create_connection is called with the exact SSL
context object that is passed (as ssl) to the __init__ method of
ApplicationRunner.
'''
loop = Mock()
loop.run_until_complete = Mock(return_value=(Mock(), Mock()))
with patch.object(asyncio, 'get_event_loop', return_value=loop):
ssl = {}
runner = ApplicationRunner(u'ws://127.0.0.1:8080/ws', u'realm',
ssl=ssl)
runner.run('_unused_')
self.assertIs(ssl, loop.create_connection.call_args[1]['ssl'])
def test_omitted_SSLContext_insecure(self):
'''
Ensure that loop.create_connection is called with ssl=False
if no ssl argument is passed to the __init__ method of
ApplicationRunner and the websocket URL starts with "ws:".
'''
loop = Mock()
loop.run_until_complete = Mock(return_value=(Mock(), Mock()))
with patch.object(asyncio, 'get_event_loop', return_value=loop):
runner = ApplicationRunner(u'ws://127.0.0.1:8080/ws', u'realm')
runner.run('_unused_')
self.assertIs(False, loop.create_connection.call_args[1]['ssl'])
def test_omitted_SSLContext_secure(self):
'''
Ensure that loop.create_connection is called with ssl=True
if no ssl argument is passed to the __init__ method of
ApplicationRunner and the websocket URL starts with "wss:".
'''
loop = Mock()
loop.run_until_complete = Mock(return_value=(Mock(), Mock()))
with patch.object(asyncio, 'get_event_loop', return_value=loop):
runner = ApplicationRunner(u'wss://127.0.0.1:8080/wss', u'realm')
runner.run('_unused_')
self.assertIs(True, loop.create_connection.call_args[1]['ssl'])
def test_conflict_SSL_True_with_ws_url(self):
'''
ApplicationRunner must raise an exception if given an ssl value of True
but only a "ws:" URL.
'''
loop = Mock()
loop.run_until_complete = Mock(return_value=(Mock(), Mock()))
with patch.object(asyncio, 'get_event_loop', return_value=loop):
runner = ApplicationRunner(u'ws://127.0.0.1:8080/wss', u'realm',
ssl=True)
error = ('^ssl argument value passed to ApplicationRunner '
'conflicts with the "ws:" prefix of the url '
'argument\. Did you mean to use "wss:"\?$')
self._assertRaisesRegex(Exception, error, runner.run, '_unused_')
def test_conflict_SSLContext_with_ws_url(self):
'''
ApplicationRunner must raise an exception if given an ssl value that is
an instance of SSLContext, but only a "ws:" URL.
'''
import ssl
try:
# Try to create an SSLContext, to be as rigorous as we can be
# by avoiding making assumptions about the ApplicationRunner
# implementation. If we happen to be on a Python that has no
# SSLContext, we pass ssl=True, which will simply cause this
# test to degenerate to the behavior of
# test_conflict_SSL_True_with_ws_url (above). In fact, at the
# moment (2015-05-10), none of this matters because the
# ApplicationRunner implementation does not check to require
# that its ssl argument is either a bool or an SSLContext. But
# that may change, so we should be careful.
ssl.create_default_context
except AttributeError:
context = True
else:
context = ssl.create_default_context()
loop = Mock()
loop.run_until_complete = Mock(return_value=(Mock(), Mock()))
with patch.object(asyncio, 'get_event_loop', return_value=loop):
runner = ApplicationRunner(u'ws://127.0.0.1:8080/wss', u'realm',
ssl=context)
error = ('^ssl argument value passed to ApplicationRunner '
'conflicts with the "ws:" prefix of the url '
'argument\. Did you mean to use "wss:"\?$')
self._assertRaisesRegex(Exception, error, runner.run, '_unused_')
|
{
"content_hash": "9a1bcdc1ffea824d0a594af9cd321c75",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 90,
"avg_line_length": 44.21637426900585,
"alnum_prop": 0.5599788387779394,
"repo_name": "hzruandd/AutobahnPython",
"id": "31b2e3e076e546ffcba2d75db19993c1f31662ea",
"size": "8838",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "autobahn/wamp/test/test_runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3444"
},
{
"name": "Python",
"bytes": "896435"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class CoreConfig(AppConfig):
name = 'apps.core'
|
{
"content_hash": "8b8acd14921a9072e37d3e226fc72158",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17.6,
"alnum_prop": 0.7386363636363636,
"repo_name": "klashxx/PyConES2017",
"id": "0fc76c70ec1c3a4d3329297503062d4400fdcefd",
"size": "88",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "web/sysgate/apps/core/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21077"
},
{
"name": "HTML",
"bytes": "19432"
},
{
"name": "Python",
"bytes": "25071"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.core.urlresolvers import reverse_lazy
urlpatterns = patterns('frontend.views',
# Examples:
# url(r'^$', 'django_xrm.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', 'person_list', name='person_list'),
)
# Url relacionadas con el manejo de usuarios
urlpatterns += patterns('django.contrib.auth.views',
url(r'^login/$', 'login', {'template_name': 'auth_account/login.html'},
name='auth_login'),
url(r'^logout/$', 'logout',
{'template_name': 'auth_account/logout.html'},
name='auth_logout'),
url(r'^password/change/$', 'password_change',
{'template_name': 'auth_account/password_change_form.html'},
name='auth_password_change'),
url(r'^password/change/done/$', 'password_change_done',
{'template_name': 'auth_account/password_change_done.html'},
name='auth_password_change_done'),
url(r'^password/reset/$', 'password_reset',
{
'template_name': 'auth_account/password_reset_form.html',
'email_template_name': 'auth_account/password_reset_email.html',
'subject_template_name': 'auth_account/password_reset_subject.html',
'post_reset_redirect': reverse_lazy('auth_password_reset_done'),
},
name='auth_password_reset'),
url(r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$',
'password_reset_confirm', {
'template_name':
'auth_account/password_reset_confirm.html',
'post_reset_redirect':
reverse_lazy('auth_password_reset_complete')},
name='auth_password_reset_confirm'),
url(r'^password/reset/complete/$', 'password_reset_complete',
{'template_name': 'auth_account/password_reset_complete.html'},
name='auth_password_reset_complete'),
url(r'^password/reset/done/$', 'password_reset_done',
{'template_name': 'auth_account/password_reset_done.html'},
name='auth_password_reset_done'),
)
|
{
"content_hash": "bd452aa5b105b469eba75a4ff1e97293",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 80,
"avg_line_length": 38.370370370370374,
"alnum_prop": 0.6196911196911197,
"repo_name": "acruxsa/django-xrm",
"id": "0e9026090993cdc0a556a4c219a5091ca2b3c07c",
"size": "2072",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "django_xrm/frontend/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2368"
},
{
"name": "JavaScript",
"bytes": "10027"
},
{
"name": "Python",
"bytes": "27154"
}
],
"symlink_target": ""
}
|
import ast
import yaml
class DefinitionVisitor(ast.NodeVisitor):
def __init__(self):
super(DefinitionVisitor, self).__init__()
self.functions = {}
self.classes = {}
self.names = {}
self.attrs = set()
self.definitions = {
'def': self.functions,
'class': self.classes,
'names': self.names,
'attrs': self.attrs,
}
def visit_Name(self, node):
self.names.setdefault(type(node.ctx).__name__, set()).add(node.id)
def visit_Attribute(self, node):
self.attrs.add(node.attr)
for child in ast.iter_child_nodes(node):
self.visit(child)
def visit_ClassDef(self, node):
visitor = DefinitionVisitor()
self.classes[node.name] = visitor.definitions
for child in ast.iter_child_nodes(node):
visitor.visit(child)
def visit_FunctionDef(self, node):
visitor = DefinitionVisitor()
self.functions[node.name] = visitor.definitions
for child in ast.iter_child_nodes(node):
visitor.visit(child)
def non_empty(defs):
functions = {name: non_empty(f) for name, f in defs['def'].items()}
classes = {name: non_empty(f) for name, f in defs['class'].items()}
result = {}
if functions: result['def'] = functions
if classes: result['class'] = classes
names = defs['names']
uses = []
for name in names.get('Load', ()):
if name not in names.get('Param', ()) and name not in names.get('Store', ()):
uses.append(name)
uses.extend(defs['attrs'])
if uses: result['uses'] = uses
result['names'] = names
result['attrs'] = defs['attrs']
return result
def definitions_in_code(input_code):
input_ast = ast.parse(input_code)
visitor = DefinitionVisitor()
visitor.visit(input_ast)
definitions = non_empty(visitor.definitions)
return definitions
def definitions_in_file(filepath):
with open(filepath) as f:
return definitions_in_code(f.read())
def defined_names(prefix, defs, names):
for name, funcs in defs.get('def', {}).items():
names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
defined_names(prefix + name + ".", funcs, names)
for name, funcs in defs.get('class', {}).items():
names.setdefault(name, {'defined': []})['defined'].append(prefix + name)
defined_names(prefix + name + ".", funcs, names)
def used_names(prefix, item, defs, names):
for name, funcs in defs.get('def', {}).items():
used_names(prefix + name + ".", name, funcs, names)
for name, funcs in defs.get('class', {}).items():
used_names(prefix + name + ".", name, funcs, names)
path = prefix.rstrip('.')
for used in defs.get('uses', ()):
if used in names:
if item:
names[item].setdefault('uses', []).append(used)
names[used].setdefault('used', {}).setdefault(item, []).append(path)
if __name__ == '__main__':
import sys, os, argparse, re
parser = argparse.ArgumentParser(description='Find definitions.')
parser.add_argument(
"--unused", action="store_true", help="Only list unused definitions"
)
parser.add_argument(
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
)
parser.add_argument(
"--pattern", action="append", metavar="REGEXP",
help="Search for a pattern"
)
parser.add_argument(
"directories", nargs='+', metavar="DIR",
help="Directories to search for definitions"
)
parser.add_argument(
"--referrers", default=0, type=int,
help="Include referrers up to the given depth"
)
parser.add_argument(
"--referred", default=0, type=int,
help="Include referred down to the given depth"
)
parser.add_argument(
"--format", default="yaml",
help="Output format, one of 'yaml' or 'dot'"
)
args = parser.parse_args()
definitions = {}
for directory in args.directories:
for root, dirs, files in os.walk(directory):
for filename in files:
if filename.endswith(".py"):
filepath = os.path.join(root, filename)
definitions[filepath] = definitions_in_file(filepath)
names = {}
for filepath, defs in definitions.items():
defined_names(filepath + ":", defs, names)
for filepath, defs in definitions.items():
used_names(filepath + ":", None, defs, names)
patterns = [re.compile(pattern) for pattern in args.pattern or ()]
ignore = [re.compile(pattern) for pattern in args.ignore or ()]
result = {}
for name, definition in names.items():
if patterns and not any(pattern.match(name) for pattern in patterns):
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
if args.unused and definition.get('used'):
continue
result[name] = definition
referrer_depth = args.referrers
referrers = set()
while referrer_depth:
referrer_depth -= 1
for entry in result.values():
for used_by in entry.get("used", ()):
referrers.add(used_by)
for name, definition in names.items():
if not name in referrers:
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
result[name] = definition
referred_depth = args.referred
referred = set()
while referred_depth:
referred_depth -= 1
for entry in result.values():
for uses in entry.get("uses", ()):
referred.add(uses)
for name, definition in names.items():
if not name in referred:
continue
if ignore and any(pattern.match(name) for pattern in ignore):
continue
result[name] = definition
if args.format == 'yaml':
yaml.dump(result, sys.stdout, default_flow_style=False)
elif args.format == 'dot':
print "digraph {"
for name, entry in result.items():
print name
for used_by in entry.get("used", ()):
if used_by in result:
print used_by, "->", name
print "}"
else:
raise ValueError("Unknown format %r" % (args.format))
|
{
"content_hash": "9f7b42a7db75580f829a4052caa157df",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 85,
"avg_line_length": 33.31958762886598,
"alnum_prop": 0.5798267326732673,
"repo_name": "TribeMedia/synapse",
"id": "47dac7772de0e0a9ab2f2c718688535f4494583e",
"size": "6484",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts-dev/definitions.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4376"
},
{
"name": "HTML",
"bytes": "9046"
},
{
"name": "JavaScript",
"bytes": "176441"
},
{
"name": "Perl",
"bytes": "31852"
},
{
"name": "Python",
"bytes": "2748398"
},
{
"name": "Shell",
"bytes": "7827"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '0012_auto_20150607_2207'),
('pages', '0012_benefitsitemplugin_benefitsplugin'),
]
operations = [
migrations.CreateModel(
name='FooterPlugin',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin')),
('footer_text', models.CharField(max_length=100, blank=True)),
('footer_link', models.CharField(max_length=200, blank=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
{
"content_hash": "dfe22ddad85164702e6bb58ccb20cfa9",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 148,
"avg_line_length": 31.192307692307693,
"alnum_prop": 0.5647348951911221,
"repo_name": "raccoongang/socraticqs2",
"id": "114be6b6a78c613b73bfa4b40b1f61c38e2d4744",
"size": "835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/pages/migrations/0013_footerplugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189600"
},
{
"name": "Dockerfile",
"bytes": "580"
},
{
"name": "Gherkin",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "342788"
},
{
"name": "JavaScript",
"bytes": "133425"
},
{
"name": "Makefile",
"bytes": "2991"
},
{
"name": "Python",
"bytes": "1504025"
},
{
"name": "Shell",
"bytes": "1521"
}
],
"symlink_target": ""
}
|
import os
# django imports
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
# settings for django-tinymce
try:
import tinymce.settings
DEFAULT_URL_TINYMCE = tinymce.settings.JS_BASE_URL + '/'
DEFAULT_PATH_TINYMCE = tinymce.settings.JS_ROOT + '/'
except ImportError:
DEFAULT_URL_TINYMCE = settings.ADMIN_MEDIA_PREFIX + "tinymce/jscripts/tiny_mce/"
DEFAULT_PATH_TINYMCE = os.path.join(settings.MEDIA_ROOT, 'admin/tinymce/jscripts/tiny_mce/')
# Set to True in order to see the FileObject when Browsing.
DEBUG = getattr(settings, "FILEBROWSER_DEBUG", False)
# Main Media Settings
MEDIA_ROOT = getattr(settings, "FILEBROWSER_MEDIA_ROOT", settings.MEDIA_ROOT)
MEDIA_URL = getattr(settings, "FILEBROWSER_MEDIA_URL", settings.MEDIA_URL)
# Main FileBrowser Directory. This has to be a directory within MEDIA_ROOT.
# Leave empty in order to browse all files under MEDIA_ROOT.
# DO NOT USE A SLASH AT THE BEGINNING, DO NOT FORGET THE TRAILING SLASH AT THE END.
DIRECTORY = getattr(settings, "FILEBROWSER_DIRECTORY", 'uploads/')
# The URL/PATH to your filebrowser media-files.
URL_FILEBROWSER_MEDIA = getattr(settings, "FILEBROWSER_URL_FILEBROWSER_MEDIA", "/media/filebrowser/")
PATH_FILEBROWSER_MEDIA = getattr(settings, "FILEBROWSER_PATH_FILEBROWSER_MEDIA", os.path.join(settings.MEDIA_ROOT, 'filebrowser/'))
# The URL/PATH to your TinyMCE Installation.
URL_TINYMCE = getattr(settings, "FILEBROWSER_URL_TINYMCE", DEFAULT_URL_TINYMCE)
PATH_TINYMCE = getattr(settings, "FILEBROWSER_PATH_TINYMCE", DEFAULT_PATH_TINYMCE)
# Allowed Extensions for File Upload. Lower case is important.
# Please be aware that there are Icons for the default extension settings.
# Therefore, if you add a category (e.g. "Misc"), you won't get an icon.
EXTENSIONS = getattr(settings, "FILEBROWSER_EXTENSIONS", {
'Folder': [''],
'Image': ['.jpg','.jpeg','.gif','.png','.tif','.tiff'],
'Video': ['.mov','.wmv','.mpeg','.mpg','.avi','.rm'],
'Document': ['.pdf','.doc','.rtf','.txt','.xls','.csv'],
'Audio': ['.mp3','.mp4','.wav','.aiff','.midi','.m4p'],
'Code': ['.html','.py','.js','.css']
})
# Define different formats for allowed selections.
# This has to be a subset of EXTENSIONS.
SELECT_FORMATS = getattr(settings, "FILEBROWSER_SELECT_FORMATS", {
'File': ['Folder','Document',],
'Image': ['Image'],
'Media': ['Video','Sound'],
'Document': ['Document'],
# for TinyMCE we can also define lower-case items
'image': ['Image'],
'file': ['Folder','Image','Document',],
})
# Directory to Save Image Versions (and Thumbnails). Relative to MEDIA_ROOT.
# If no directory is given, versions are stored within the Image directory.
# VERSION URL: VERSIONS_BASEDIR/original_path/originalfilename_versionsuffix.extension
VERSIONS_BASEDIR = getattr(settings, 'FILEBROWSER_VERSIONS_BASEDIR', '')
# Versions Format. Available Attributes: verbose_name, width, height, opts
VERSIONS = getattr(settings, "FILEBROWSER_VERSIONS", {
'fb_thumb': {'verbose_name': 'Admin Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop upscale'},
'thumbnail': {'verbose_name': 'Thumbnail (140px)', 'width': 140, 'height': '', 'opts': ''},
'small': {'verbose_name': 'Small (300px)', 'width': 300, 'height': '', 'opts': ''},
'medium': {'verbose_name': 'Medium (460px)', 'width': 460, 'height': '', 'opts': ''},
'big': {'verbose_name': 'Big (620px)', 'width': 620, 'height': '', 'opts': ''},
'cropped': {'verbose_name': 'Cropped (60x60px)', 'width': 60, 'height': 60, 'opts': 'crop'},
'croppedthumbnail': {'verbose_name': 'Cropped Thumbnail (140x140px)', 'width': 140, 'height': 140, 'opts': 'crop'},
})
# Versions available within the Admin-Interface.
ADMIN_VERSIONS = getattr(settings, 'FILEBROWSER_ADMIN_VERSIONS', ['thumbnail','small', 'medium','big'])
# Which Version should be used as Admin-thumbnail.
ADMIN_THUMBNAIL = getattr(settings, 'FILEBROWSER_ADMIN_THUMBNAIL', 'fb_thumb')
# EXTRA SETTINGS
# True to save the URL including MEDIA_URL to your model fields
# or False (default) to save path relative to MEDIA_URL.
# Note: Full URL does not necessarily means absolute URL.
SAVE_FULL_URL = getattr(settings, "FILEBROWSER_SAVE_FULL_URL", True)
# If set to True, the FileBrowser will not try to import a mis-installed PIL.
STRICT_PIL = getattr(settings, 'FILEBROWSER_STRICT_PIL', False)
# PIL's Error "Suspension not allowed here" work around:
# s. http://mail.python.org/pipermail/image-sig/1999-August/000816.html
IMAGE_MAXBLOCK = getattr(settings, 'FILEBROWSER_IMAGE_MAXBLOCK', 1024*1024)
# Exclude files matching any of the following regular expressions
# Default is to exclude 'thumbnail' style naming of image-thumbnails.
EXTENSION_LIST = []
for exts in EXTENSIONS.values():
EXTENSION_LIST += exts
EXCLUDE = getattr(settings, 'FILEBROWSER_EXCLUDE', (r'_(%(exts)s)_.*_q\d{1,3}\.(%(exts)s)' % {'exts': ('|'.join(EXTENSION_LIST))},))
# Max. Upload Size in Bytes.
MAX_UPLOAD_SIZE = getattr(settings, "FILEBROWSER_MAX_UPLOAD_SIZE", 10485760)
# Convert Filename (replace spaces and convert to lowercase)
CONVERT_FILENAME = getattr(settings, "FILEBROWSER_CONVERT_FILENAME", True)
# Max. Entries per Page
# Loading a Sever-Directory with lots of files might take a while
# Use this setting to limit the items shown
LIST_PER_PAGE = getattr(settings, "FILEBROWSER_LIST_PER_PAGE", 50)
# Default Sorting
# Options: date, filesize, filename_lower, filetype_checked
DEFAULT_SORTING_BY = getattr(settings, "FILEBROWSER_DEFAULT_SORTING_BY", "date")
# Sorting Order: asc, desc
DEFAULT_SORTING_ORDER = getattr(settings, "FILEBROWSER_DEFAULT_SORTING_ORDER", "desc")
# regex to clean dir names before creation
FOLDER_REGEX = getattr(settings, "FILEBROWSER_FOLDER_REGEX", r'^[\sa-zA-Z0-9._/-]+$')
# EXTRA TRANSLATION STRINGS
# The following strings are not availabe within views or templates
_('Folder')
_('Image')
_('Video')
_('Document')
_('Audio')
_('Code')
|
{
"content_hash": "9e065bfcd67aa810f7f567b37d96c954",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 132,
"avg_line_length": 49.483333333333334,
"alnum_prop": 0.7054563826204109,
"repo_name": "dwaiter/django-filebrowser-old",
"id": "9586cc42dbf7a1fda7d6835a43ed13a4c79d2d51",
"size": "5965",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "filebrowser/settings.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "321037"
},
{
"name": "JavaScript",
"bytes": "169792"
},
{
"name": "PHP",
"bytes": "7956"
},
{
"name": "Python",
"bytes": "67745"
}
],
"symlink_target": ""
}
|
from .mnemon import mnemon as mnc
from .dec import mnemon as mnd
__all__ = ["mnc", "mnd"]
|
{
"content_hash": "f5ef4b52343f720e000837b3be5d116e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 33,
"avg_line_length": 22.75,
"alnum_prop": 0.6593406593406593,
"repo_name": "myyc/mnemon",
"id": "4c7ee786984e28e05ba8c3a0111b387a6704af6c",
"size": "91",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mnemon/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7442"
}
],
"symlink_target": ""
}
|
from ament_pep257.main import main
def test_pep257():
rc = main(argv=[])
assert rc == 0, 'Found code style errors / warnings'
|
{
"content_hash": "03d2a7a9cb96660549ce95a96d67e323",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 56,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.6544117647058824,
"repo_name": "dhood/launch",
"id": "dd3ea28ee3bd12759a9decb53480853d63ca372b",
"size": "738",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "launch/test/test_pep257.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "67631"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
from flexget.utils.log import log_once
log = logging.getLogger('imdb')
class FilterImdb(object):
"""
This plugin allows filtering based on IMDB score, votes and genres etc.
Note: All parameters are optional. Some are mutually exclusive.
Configuration::
min_score: <num>
min_votes: <num>
min_meta_score: <num>
min_year: <num>
max_year: <num>
# accept movies with any of these genres
accept_genres:
- genre1
- genre2
# reject if genre contains any of these
reject_genres:
- genre1
- genre2
# reject if language contain any of these
reject_languages:
- language1
# accept only these primary languages
accept_languages:
- language1
# accept movies with any of these actors
accept_actors:
- nm0004695
- nm0004754
# reject movie if it has any of these actors
reject_actors:
- nm0001191
- nm0002071
# accept all movies by these directors
accept_directors:
- nm0000318
# reject movies by these directors
reject_directors:
- nm0093051
# accept all movies by these writers
accept_writers:
- nm0000318
# reject movies by these writers
reject_writers:
- nm0093051
# reject movies/TV shows with any of these ratings
reject_mpaa_ratings:
- PG_13
- R
- X
# accept movies/TV shows with only these ratings
accept_mpaa_ratings:
- PG
- G
- TV_Y
"""
schema = {
'type': 'object',
'properties': {
'min_year': {'type': 'integer'},
'max_year': {'type': 'integer'},
'min_votes': {'type': 'integer'},
'min_meta_score': {'type': 'integer'},
'min_score': {'type': 'number'},
'accept_genres': {'type': 'array', 'items': {'type': 'string'}},
'reject_genres': {'type': 'array', 'items': {'type': 'string'}},
'reject_languages': {'type': 'array', 'items': {'type': 'string'}},
'accept_languages': {'type': 'array', 'items': {'type': 'string'}},
'reject_actors': {'type': 'array', 'items': {'type': 'string'}},
'accept_actors': {'type': 'array', 'items': {'type': 'string'}},
'reject_directors': {'type': 'array', 'items': {'type': 'string'}},
'accept_directors': {'type': 'array', 'items': {'type': 'string'}},
'reject_writers': {'type': 'array', 'items': {'type': 'string'}},
'accept_writers': {'type': 'array', 'items': {'type': 'string'}},
'reject_mpaa_ratings': {'type': 'array', 'items': {'type': 'string'}},
'accept_mpaa_ratings': {'type': 'array', 'items': {'type': 'string'}},
},
'additionalProperties': False,
}
# Run later to avoid unnecessary lookups
@plugin.priority(120)
def on_task_filter(self, task, config):
lookup = plugin.get('imdb_lookup', self).lookup
# since the plugin does not reject anything, no sense going trough accepted
for entry in task.undecided:
force_accept = False
try:
lookup(entry)
except plugin.PluginError as e:
# logs skip message once trough log_once (info) and then only when ran from cmd line (w/o --cron)
msg = 'Skipping %s because of an error: %s' % (entry['title'], e.value)
if not log_once(msg, logger=log):
log.verbose(msg)
continue
# for key, value in entry.iteritems():
# log.debug('%s = %s (type: %s)' % (key, value, type(value)))
# Check defined conditions, TODO: rewrite into functions?
reasons = []
if 'min_score' in config:
if entry.get('imdb_score', 0) < config['min_score']:
reasons.append(
'min_score (%s < %s)' % (entry.get('imdb_score'), config['min_score'])
)
if 'min_votes' in config:
if entry.get('imdb_votes', 0) < config['min_votes']:
reasons.append(
'min_votes (%s < %s)' % (entry.get('imdb_votes'), config['min_votes'])
)
if 'min_meta_score' in config:
if entry.get('imdb_meta_score', 0) < config['min_meta_score']:
reasons.append(
'min_meta_score (%s < %s)'
% (entry.get('imdb_meta_score'), config['min_meta_score'])
)
if 'min_year' in config:
if entry.get('imdb_year', 0) < config['min_year']:
reasons.append(
'min_year (%s < %s)' % (entry.get('imdb_year'), config['min_year'])
)
if 'max_year' in config:
if entry.get('imdb_year', 0) > config['max_year']:
reasons.append(
'max_year (%s > %s)' % (entry.get('imdb_year'), config['max_year'])
)
if 'accept_genres' in config:
accepted = config['accept_genres']
accept_genre = False
for genre in entry.get('imdb_genres', []):
if genre in accepted:
accept_genre = True
break
if accept_genre == False:
reasons.append('accept_genres')
if 'reject_genres' in config:
rejected = config['reject_genres']
for genre in entry.get('imdb_genres', []):
if genre in rejected:
reasons.append('reject_genres')
break
if 'reject_languages' in config:
rejected = config['reject_languages']
for language in entry.get('imdb_languages', []):
if language in rejected:
reasons.append('reject_languages')
break
if 'accept_languages' in config:
accepted = config['accept_languages']
if entry.get('imdb_languages') and entry['imdb_languages'][0] not in accepted:
# Reject if the first (primary) language is not among acceptable languages
reasons.append('accept_languages')
if 'reject_actors' in config:
rejected = config['reject_actors']
for actor_id, actor_name in entry.get('imdb_actors', {}).items():
if actor_id in rejected or actor_name in rejected:
reasons.append('reject_actors %s' % actor_name or actor_id)
break
# Accept if actors contains an accepted actor, but don't reject otherwise
if 'accept_actors' in config:
accepted = config['accept_actors']
for actor_id, actor_name in entry.get('imdb_actors', {}).items():
if actor_id in accepted or actor_name in accepted:
log.debug('Accepting because of accept_actors %s' % actor_name or actor_id)
force_accept = True
break
if 'reject_directors' in config:
rejected = config['reject_directors']
for director_id, director_name in entry.get('imdb_directors', {}).items():
if director_id in rejected or director_name in rejected:
reasons.append('reject_directors %s' % director_name or director_id)
break
# Accept if the director is in the accept list, but do not reject if the director is unknown
if 'accept_directors' in config:
accepted = config['accept_directors']
for director_id, director_name in entry.get('imdb_directors', {}).items():
if director_id in accepted or director_name in accepted:
log.debug(
'Accepting because of accept_directors %s' % director_name
or director_id
)
force_accept = True
break
if 'reject_writers' in config:
rejected = config['reject_writers']
for writer_id, writer_name in entry.get('imdb_writers', {}).items():
if writer_id in rejected or writer_name in rejected:
reasons.append('reject_writers %s' % writer_name or writer_id)
break
# Accept if the writer is in the accept list, but do not reject if the writer is unknown
if 'accept_writers' in config:
accepted = config['accept_writers']
for writer_id, writer_name in entry.get('imdb_writers', {}).items():
if writer_id in accepted or writer_name in accepted:
log.debug(
'Accepting because of accept_writers %s' % writer_name or writer_id
)
force_accept = True
break
if 'reject_mpaa_ratings' in config:
rejected = config['reject_mpaa_ratings']
if entry.get('imdb_mpaa_rating') in rejected:
reasons.append('reject_mpaa_ratings %s' % entry['imdb_mpaa_rating'])
if 'accept_mpaa_ratings' in config:
accepted = config['accept_mpaa_ratings']
if entry.get('imdb_mpaa_rating') not in accepted:
reasons.append('accept_mpaa_ratings %s' % entry.get('imdb_mpaa_rating'))
if reasons and not force_accept:
msg = 'Didn\'t accept `%s` because of rule(s) %s' % (
entry.get('imdb_name', None) or entry['title'],
', '.join(reasons),
)
if task.options.debug:
log.debug(msg)
else:
if task.options.cron:
log_once(msg, log)
else:
log.info(msg)
else:
log.debug('Accepting %s' % (entry['title']))
entry.accept()
@event('plugin.register')
def register_plugin():
plugin.register(FilterImdb, 'imdb', api_ver=2)
|
{
"content_hash": "b2a9bce849fda918100fb26a74b144e0",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 113,
"avg_line_length": 39.8235294117647,
"alnum_prop": 0.5033234859675036,
"repo_name": "gazpachoking/Flexget",
"id": "458de4b110386a5cc205938a2cdb931d15ad06f4",
"size": "10832",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/components/imdb/imdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36113"
},
{
"name": "JavaScript",
"bytes": "133743"
},
{
"name": "Python",
"bytes": "1494170"
}
],
"symlink_target": ""
}
|
"""Module for constructing RNN Cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.contrib.compiler import jit
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def _get_concat_variable(name, shape, dtype, num_shards):
"""Get a sharded variable concatenated into one tensor."""
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
if len(sharded_variable) == 1:
return sharded_variable[0]
concat_name = name + "/concat"
concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0"
for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES):
if value.name == concat_full_name:
return value
concat_variable = array_ops.concat(sharded_variable, 0, name=concat_name)
ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES,
concat_variable)
return concat_variable
def _get_sharded_variable(name, shape, dtype, num_shards):
"""Get a list of sharded variables with the given dtype."""
if num_shards > shape[0]:
raise ValueError("Too many shards: shape=%s, num_shards=%d" %
(shape, num_shards))
unit_shard_size = int(math.floor(shape[0] / num_shards))
remaining_rows = shape[0] - unit_shard_size * num_shards
shards = []
for i in range(num_shards):
current_size = unit_shard_size
if i < remaining_rows:
current_size += 1
shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
dtype=dtype))
return shards
def _norm(g, b, inp, scope):
shape = inp.get_shape()[-1:]
gamma_init = init_ops.constant_initializer(g)
beta_init = init_ops.constant_initializer(b)
with vs.variable_scope(scope):
# Initialize beta and gamma for use by layer_norm.
vs.get_variable("gamma", shape=shape, initializer=gamma_init)
vs.get_variable("beta", shape=shape, initializer=beta_init)
normalized = layers.layer_norm(inp, reuse=True, scope=scope)
return normalized
class CoupledInputForgetGateLSTMCell(rnn_cell_impl.RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://www.bioinf.jku.at/publications/older/2604.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The coupling of input and forget gate is based on:
http://arxiv.org/pdf/1503.04069.pdf
Greff et al. "LSTM: A Search Space Odyssey"
The class uses optional peep-hole connections, and an optional projection
layer.
Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
"""
def __init__(self, num_units, use_peepholes=False,
initializer=None, num_proj=None, proj_clip=None,
num_unit_shards=1, num_proj_shards=1,
forget_bias=1.0, state_is_tuple=True,
activation=math_ops.tanh, reuse=None,
layer_norm=False, norm_gain=1.0, norm_shift=0.0):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
num_proj_shards: How to split the projection matrix. If >1, the
projection matrix is stored across num_proj_shards.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
"""
super(CoupledInputForgetGateLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
self._reuse = reuse
self._layer_norm = layer_norm
self._norm_gain = norm_gain
self._norm_shift = norm_shift
if num_proj:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
if state_is_tuple else num_units + num_proj)
self._output_size = num_proj
else:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_units)
if state_is_tuple else 2 * num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: if `state_is_tuple` is False, this must be a state Tensor,
`2-D, batch x state_size`. If `state_is_tuple` is True, this must be a
tuple of state Tensors, both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
num_proj = self._num_units if self._num_proj is None else self._num_proj
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
concat_w = _get_concat_variable(
"W", [input_size.value + num_proj, 3 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[3 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([inputs, m_prev], 1)
lstm_matrix = math_ops.matmul(cell_inputs, concat_w)
# If layer nomalization is applied, do not add bias
if not self._layer_norm:
lstm_matrix = nn_ops.bias_add(lstm_matrix, b)
j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=3, axis=1)
# Apply layer normalization
if self._layer_norm:
j = _norm(self._norm_gain, self._norm_shift, j, "transform")
f = _norm(self._norm_gain, self._norm_shift, f, "forget")
o = _norm(self._norm_gain, self._norm_shift, o, "output")
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
f_act = sigmoid(f + self._forget_bias + w_f_diag * c_prev)
else:
f_act = sigmoid(f + self._forget_bias)
c = (f_act * c_prev + (1 - f_act) * self._activation(j))
# Apply layer normalization
if self._layer_norm:
c = _norm(self._norm_gain, self._norm_shift, c, "state")
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
concat_w_proj = _get_concat_variable(
"W_P", [self._num_units, self._num_proj],
dtype, self._num_proj_shards)
m = math_ops.matmul(m, concat_w_proj)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (rnn_cell_impl.LSTMStateTuple(c, m)
if self._state_is_tuple else array_ops.concat([c, m], 1))
return m, new_state
class TimeFreqLSTMCell(rnn_cell_impl.RNNCell):
"""Time-Frequency Long short-term memory unit (LSTM) recurrent network cell.
This implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
It uses peep-hole connections and optional cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_unit_shards: int, How to split the weight matrix. If >1, the weight
matrix is stored across num_unit_shards.
forget_bias: float, Biases of the forget gate are initialized by default
to 1 in order to reduce the scale of forgetting at the beginning
of the training.
feature_size: int, The size of the input feature the LSTM spans over.
frequency_skip: int, The amount the LSTM filter is shifted by in
frequency.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(TimeFreqLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._state_size = 2 * num_units
self._output_size = num_units
self._reuse = reuse
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: state Tensor, 2D, batch x state_size.
Returns:
A tuple containing:
- A 2D, batch x output_dim, Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, batch x state_size, Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
freq_inputs = self._make_tf_features(inputs)
dtype = inputs.dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
concat_w = _get_concat_variable(
"W", [actual_input_size + 2*self._num_units, 4 * self._num_units],
dtype, self._num_unit_shards)
b = vs.get_variable(
"B",
shape=[4 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
# Diagonal connections
if self._use_peepholes:
w_f_diag = vs.get_variable(
"W_F_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"W_I_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"W_O_diag", shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([int(inputs.get_shape()[0]),
self._num_units], dtype)
for fq in range(len(freq_inputs)):
c_prev = array_ops.slice(state, [0, 2*fq*self._num_units],
[-1, self._num_units])
m_prev = array_ops.slice(state, [0, (2*fq+1)*self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat([freq_inputs[fq], m_prev, m_prev_freq],
1)
lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * tanh(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * tanh(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * tanh(c)
else:
m = sigmoid(o) * tanh(c)
m_prev_freq = m
if fq == 0:
state_out = array_ops.concat([c, m], 1)
m_out = m
else:
state_out = array_ops.concat([state_out, c, m], 1)
m_out = array_ops.concat([m_out, m], 1)
return m_out, state_out
def _make_tf_features(self, input_feat):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, batch x num_units.
Returns:
A list of frequency features, with each element containing:
- A 2D, batch x output_dim, Tensor representing the time-frequency feature
for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
freq_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(input_feat, [0, f*self._frequency_skip],
[-1, self._feature_size])
freq_inputs.append(cur_input)
return freq_inputs
class GridLSTMCell(rnn_cell_impl.RNNCell):
"""Grid Long short-term memory unit (LSTM) recurrent network cell.
The default is based on:
Nal Kalchbrenner, Ivo Danihelka and Alex Graves
"Grid Long Short-Term Memory," Proc. ICLR 2016.
http://arxiv.org/abs/1507.01526
When peephole connections are used, the implementation is based on:
Tara N. Sainath and Bo Li
"Modeling Time-Frequency Patterns with LSTM vs. Convolutional Architectures
for LVCSR Tasks." submitted to INTERSPEECH, 2016.
The code uses optional peephole connections, shared_weights and cell clipping.
"""
def __init__(self, num_units, use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
state_is_tuple=True,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
matrix. If > 1,the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
feature_size: (optional) int, default None, The size of the input feature
the LSTM spans over.
frequency_skip: (optional) int, default None, The amount the LSTM filter
is shifted by in frequency.
num_frequency_blocks: [required] A list of frequency blocks needed to
cover the whole input feature splitting defined by start_freqindex_list
and end_freqindex_list.
start_freqindex_list: [optional], list of ints, default None, The
starting frequency index for each frequency block.
end_freqindex_list: [optional], list of ints, default None. The ending
frequency index for each frequency block.
couple_input_forget_gates: (optional) bool, default False, Whether to
couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce
model parameters and computation cost.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. By default (False), they are concatenated
along the column axis. This default behavior will soon be deprecated.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
ValueError: if the num_frequency_blocks list is not specified
"""
super(GridLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._share_time_frequency_weights = share_time_frequency_weights
self._couple_input_forget_gates = couple_input_forget_gates
self._state_is_tuple = state_is_tuple
self._cell_clip = cell_clip
self._initializer = initializer
self._num_unit_shards = num_unit_shards
self._forget_bias = forget_bias
self._feature_size = feature_size
self._frequency_skip = frequency_skip
self._start_freqindex_list = start_freqindex_list
self._end_freqindex_list = end_freqindex_list
self._num_frequency_blocks = num_frequency_blocks
self._total_blocks = 0
self._reuse = reuse
if self._num_frequency_blocks is None:
raise ValueError("Must specify num_frequency_blocks")
for block_index in range(len(self._num_frequency_blocks)):
self._total_blocks += int(self._num_frequency_blocks[block_index])
if state_is_tuple:
state_names = ""
for block_index in range(len(self._num_frequency_blocks)):
for freq_index in range(self._num_frequency_blocks[block_index]):
name_prefix = "state_f%02d_b%02d" % (freq_index, block_index)
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple(
"GridLSTMStateTuple", state_names.strip(","))
self._state_size = self._state_tuple_type(
*([num_units, num_units] * self._total_blocks))
else:
self._state_tuple_type = None
self._state_size = num_units * self._total_blocks * 2
self._output_size = num_units * self._total_blocks * 2
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
@property
def state_tuple_type(self):
return self._state_tuple_type
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, [batch, feature_size].
state: Tensor or tuple of Tensors, 2D, [batch, state_size], depends on the
flag self._state_is_tuple.
Returns:
A tuple containing:
- A 2D, [batch, output_dim], Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, [batch, state_size], Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
freq_inputs = self._make_tf_features(inputs)
m_out_lst = []
state_out_lst = []
for block in range(len(freq_inputs)):
m_out_lst_current, state_out_lst_current = self._compute(
freq_inputs[block], block, state, batch_size,
state_is_tuple=self._state_is_tuple)
m_out_lst.extend(m_out_lst_current)
state_out_lst.extend(state_out_lst_current)
if self._state_is_tuple:
state_out = self._state_tuple_type(*state_out_lst)
else:
state_out = array_ops.concat(state_out_lst, 1)
m_out = array_ops.concat(m_out_lst, 1)
return m_out, state_out
def _compute(self, freq_inputs, block, state, batch_size,
state_prefix="state",
state_is_tuple=True):
"""Run the actual computation of one step LSTM.
Args:
freq_inputs: list of Tensors, 2D, [batch, feature_size].
block: int, current frequency block index to process.
state: Tensor or tuple of Tensors, 2D, [batch, state_size], it depends on
the flag state_is_tuple.
batch_size: int32, batch size.
state_prefix: (optional) string, name prefix for states, defaults to
"state".
state_is_tuple: boolean, indicates whether the state is a tuple or Tensor.
Returns:
A tuple, containing:
- A list of [batch, output_dim] Tensors, representing the output of the
LSTM given the inputs and state.
- A list of [batch, state_size] Tensors, representing the LSTM state
values given the inputs and previous state.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
num_gates = 3 if self._couple_input_forget_gates else 4
dtype = freq_inputs[0].dtype
actual_input_size = freq_inputs[0].get_shape().as_list()[1]
concat_w_f = _get_concat_variable(
"W_f_%d" % block, [actual_input_size + 2 * self._num_units,
num_gates * self._num_units],
dtype, self._num_unit_shards)
b_f = vs.get_variable(
"B_f_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if not self._share_time_frequency_weights:
concat_w_t = _get_concat_variable(
"W_t_%d" % block, [actual_input_size + 2 * self._num_units,
num_gates * self._num_units],
dtype, self._num_unit_shards)
b_t = vs.get_variable(
"B_t_%d" % block,
shape=[num_gates * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
if self._use_peepholes:
# Diagonal connections
if not self._couple_input_forget_gates:
w_f_diag_freqf = vs.get_variable(
"W_F_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_freqt = vs.get_variable(
"W_F_diag_freqt_%d"% block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqf = vs.get_variable(
"W_I_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_freqt = vs.get_variable(
"W_I_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqf = vs.get_variable(
"W_O_diag_freqf_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_freqt = vs.get_variable(
"W_O_diag_freqt_%d" % block, shape=[self._num_units], dtype=dtype)
if not self._share_time_frequency_weights:
if not self._couple_input_forget_gates:
w_f_diag_timef = vs.get_variable(
"W_F_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_f_diag_timet = vs.get_variable(
"W_F_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timef = vs.get_variable(
"W_I_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_i_diag_timet = vs.get_variable(
"W_I_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timef = vs.get_variable(
"W_O_diag_timef_%d" % block, shape=[self._num_units], dtype=dtype)
w_o_diag_timet = vs.get_variable(
"W_O_diag_timet_%d" % block, shape=[self._num_units], dtype=dtype)
# initialize the first freq state to be zero
m_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
c_prev_freq = array_ops.zeros([batch_size, self._num_units], dtype)
for freq_index in range(len(freq_inputs)):
if state_is_tuple:
name_prefix = "%s_f%02d_b%02d" % (state_prefix, freq_index, block)
c_prev_time = getattr(state, name_prefix + "_c")
m_prev_time = getattr(state, name_prefix + "_m")
else:
c_prev_time = array_ops.slice(
state, [0, 2 * freq_index * self._num_units],
[-1, self._num_units])
m_prev_time = array_ops.slice(
state, [0, (2 * freq_index + 1) * self._num_units],
[-1, self._num_units])
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
cell_inputs = array_ops.concat(
[freq_inputs[freq_index], m_prev_time, m_prev_freq], 1)
# F-LSTM
lstm_matrix_freq = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_f), b_f)
if self._couple_input_forget_gates:
i_freq, j_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
f_freq = None
else:
i_freq, j_freq, f_freq, o_freq = array_ops.split(
value=lstm_matrix_freq, num_or_size_splits=num_gates, axis=1)
# T-LSTM
if self._share_time_frequency_weights:
i_time = i_freq
j_time = j_freq
f_time = f_freq
o_time = o_freq
else:
lstm_matrix_time = nn_ops.bias_add(math_ops.matmul(cell_inputs,
concat_w_t), b_t)
if self._couple_input_forget_gates:
i_time, j_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
f_time = None
else:
i_time, j_time, f_time, o_time = array_ops.split(
value=lstm_matrix_time, num_or_size_splits=num_gates, axis=1)
# F-LSTM c_freq
# input gate activations
if self._use_peepholes:
i_freq_g = sigmoid(i_freq +
w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_freq_g = sigmoid(i_freq)
# forget gate activations
if self._couple_input_forget_gates:
f_freq_g = 1.0 - i_freq_g
else:
if self._use_peepholes:
f_freq_g = sigmoid(f_freq + self._forget_bias +
w_f_diag_freqf * c_prev_freq +
w_f_diag_freqt * c_prev_time)
else:
f_freq_g = sigmoid(f_freq + self._forget_bias)
# cell state
c_freq = f_freq_g * c_prev_freq + i_freq_g * tanh(j_freq)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_freq = clip_ops.clip_by_value(c_freq, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# T-LSTM c_freq
# input gate activations
if self._use_peepholes:
if self._share_time_frequency_weights:
i_time_g = sigmoid(i_time +
w_i_diag_freqf * c_prev_freq +
w_i_diag_freqt * c_prev_time)
else:
i_time_g = sigmoid(i_time +
w_i_diag_timef * c_prev_freq +
w_i_diag_timet * c_prev_time)
else:
i_time_g = sigmoid(i_time)
# forget gate activations
if self._couple_input_forget_gates:
f_time_g = 1.0 - i_time_g
else:
if self._use_peepholes:
if self._share_time_frequency_weights:
f_time_g = sigmoid(f_time + self._forget_bias +
w_f_diag_freqf * c_prev_freq +
w_f_diag_freqt * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias +
w_f_diag_timef * c_prev_freq +
w_f_diag_timet * c_prev_time)
else:
f_time_g = sigmoid(f_time + self._forget_bias)
# cell state
c_time = f_time_g * c_prev_time + i_time_g * tanh(j_time)
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c_time = clip_ops.clip_by_value(c_time, -self._cell_clip,
self._cell_clip)
# pylint: enable=invalid-unary-operand-type
# F-LSTM m_freq
if self._use_peepholes:
m_freq = sigmoid(o_freq +
w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_freq)
else:
m_freq = sigmoid(o_freq) * tanh(c_freq)
# T-LSTM m_time
if self._use_peepholes:
if self._share_time_frequency_weights:
m_time = sigmoid(o_time +
w_o_diag_freqf * c_freq +
w_o_diag_freqt * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time +
w_o_diag_timef * c_freq +
w_o_diag_timet * c_time) * tanh(c_time)
else:
m_time = sigmoid(o_time) * tanh(c_time)
m_prev_freq = m_freq
c_prev_freq = c_freq
# Concatenate the outputs for T-LSTM and F-LSTM for each shift
if freq_index == 0:
state_out_lst = [c_time, m_time]
m_out_lst = [m_time, m_freq]
else:
state_out_lst.extend([c_time, m_time])
m_out_lst.extend([m_time, m_freq])
return m_out_lst, state_out_lst
def _make_tf_features(self, input_feat, slice_offset=0):
"""Make the frequency features.
Args:
input_feat: input Tensor, 2D, [batch, num_units].
slice_offset: (optional) Python int, default 0, the slicing offset is only
used for the backward processing in the BidirectionalGridLSTMCell. It
specifies a different starting point instead of always 0 to enable the
forward and backward processing look at different frequency blocks.
Returns:
A list of frequency features, with each element containing:
- A 2D, [batch, output_dim], Tensor representing the time-frequency
feature for that frequency index. Here output_dim is feature_size.
Raises:
ValueError: if input_size cannot be inferred from static shape inference.
"""
input_size = input_feat.get_shape().with_rank(2)[-1].value
if input_size is None:
raise ValueError("Cannot infer input_size from static shape inference.")
if slice_offset > 0:
# Padding to the end
inputs = array_ops.pad(
input_feat, array_ops.constant([0, 0, 0, slice_offset], shape=[2, 2],
dtype=dtypes.int32),
"CONSTANT")
elif slice_offset < 0:
# Padding to the front
inputs = array_ops.pad(
input_feat, array_ops.constant([0, 0, -slice_offset, 0], shape=[2, 2],
dtype=dtypes.int32),
"CONSTANT")
slice_offset = 0
else:
inputs = input_feat
freq_inputs = []
if not self._start_freqindex_list:
if len(self._num_frequency_blocks) != 1:
raise ValueError("Length of num_frequency_blocks"
" is not 1, but instead is %d",
len(self._num_frequency_blocks))
num_feats = int((input_size - self._feature_size) / (
self._frequency_skip)) + 1
if num_feats != self._num_frequency_blocks[0]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." % (
self._num_frequency_blocks[0], num_feats))
block_inputs = []
for f in range(num_feats):
cur_input = array_ops.slice(
inputs, [0, slice_offset + f * self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
else:
if len(self._start_freqindex_list) != len(self._end_freqindex_list):
raise ValueError("Length of start and end freqindex_list"
" does not match %d %d",
len(self._start_freqindex_list),
len(self._end_freqindex_list))
if len(self._num_frequency_blocks) != len(self._start_freqindex_list):
raise ValueError("Length of num_frequency_blocks"
" is not equal to start_freqindex_list %d %d",
len(self._num_frequency_blocks),
len(self._start_freqindex_list))
for b in range(len(self._start_freqindex_list)):
start_index = self._start_freqindex_list[b]
end_index = self._end_freqindex_list[b]
cur_size = end_index - start_index
block_feats = int((cur_size - self._feature_size) / (
self._frequency_skip)) + 1
if block_feats != self._num_frequency_blocks[b]:
raise ValueError(
"Invalid num_frequency_blocks, requires %d but gets %d, please"
" check the input size and filter config are correct." % (
self._num_frequency_blocks[b], block_feats))
block_inputs = []
for f in range(block_feats):
cur_input = array_ops.slice(
inputs, [0, start_index + slice_offset + f *
self._frequency_skip],
[-1, self._feature_size])
block_inputs.append(cur_input)
freq_inputs.append(block_inputs)
return freq_inputs
class BidirectionalGridLSTMCell(GridLSTMCell):
"""Bidirectional GridLstm cell.
The bidirection connection is only used in the frequency direction, which
hence doesn't affect the time direction's real-time processing that is
required for online recognition systems.
The current implementation uses different weights for the two directions.
"""
def __init__(self, num_units, use_peepholes=False,
share_time_frequency_weights=False,
cell_clip=None, initializer=None,
num_unit_shards=1, forget_bias=1.0,
feature_size=None, frequency_skip=None,
num_frequency_blocks=None,
start_freqindex_list=None,
end_freqindex_list=None,
couple_input_forget_gates=False,
backward_slice_offset=0,
reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: (optional) bool, default False. Set True to enable
diagonal/peephole connections.
share_time_frequency_weights: (optional) bool, default False. Set True to
enable shared cell weights between time and frequency LSTMs.
cell_clip: (optional) A float value, default None, if provided the cell
state is clipped by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices, default None.
num_unit_shards: (optional) int, default 1, How to split the weight
matrix. If > 1,the weight matrix is stored across num_unit_shards.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
feature_size: (optional) int, default None, The size of the input feature
the LSTM spans over.
frequency_skip: (optional) int, default None, The amount the LSTM filter
is shifted by in frequency.
num_frequency_blocks: [required] A list of frequency blocks needed to
cover the whole input feature splitting defined by start_freqindex_list
and end_freqindex_list.
start_freqindex_list: [optional], list of ints, default None, The
starting frequency index for each frequency block.
end_freqindex_list: [optional], list of ints, default None. The ending
frequency index for each frequency block.
couple_input_forget_gates: (optional) bool, default False, Whether to
couple the input and forget gates, i.e. f_gate = 1.0 - i_gate, to reduce
model parameters and computation cost.
backward_slice_offset: (optional) int32, default 0, the starting offset to
slice the feature for backward processing.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(BidirectionalGridLSTMCell, self).__init__(
num_units, use_peepholes, share_time_frequency_weights, cell_clip,
initializer, num_unit_shards, forget_bias, feature_size, frequency_skip,
num_frequency_blocks, start_freqindex_list, end_freqindex_list,
couple_input_forget_gates, True, reuse)
self._backward_slice_offset = int(backward_slice_offset)
state_names = ""
for direction in ["fwd", "bwd"]:
for block_index in range(len(self._num_frequency_blocks)):
for freq_index in range(self._num_frequency_blocks[block_index]):
name_prefix = "%s_state_f%02d_b%02d" % (direction, freq_index,
block_index)
state_names += ("%s_c, %s_m," % (name_prefix, name_prefix))
self._state_tuple_type = collections.namedtuple(
"BidirectionalGridLSTMStateTuple", state_names.strip(","))
self._state_size = self._state_tuple_type(
*([num_units, num_units] * self._total_blocks * 2))
self._output_size = 2 * num_units * self._total_blocks * 2
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, [batch, num_units].
state: tuple of Tensors, 2D, [batch, state_size].
Returns:
A tuple containing:
- A 2D, [batch, output_dim], Tensor representing the output of the LSTM
after reading "inputs" when previous state was "state".
Here output_dim is num_units.
- A 2D, [batch, state_size], Tensor representing the new state of LSTM
after reading "inputs" when previous state was "state".
Raises:
ValueError: if an input_size was specified and the provided inputs have
a different dimension.
"""
batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
fwd_inputs = self._make_tf_features(inputs)
if self._backward_slice_offset:
bwd_inputs = self._make_tf_features(inputs, self._backward_slice_offset)
else:
bwd_inputs = fwd_inputs
# Forward processing
with vs.variable_scope("fwd"):
fwd_m_out_lst = []
fwd_state_out_lst = []
for block in range(len(fwd_inputs)):
fwd_m_out_lst_current, fwd_state_out_lst_current = self._compute(
fwd_inputs[block], block, state, batch_size,
state_prefix="fwd_state", state_is_tuple=True)
fwd_m_out_lst.extend(fwd_m_out_lst_current)
fwd_state_out_lst.extend(fwd_state_out_lst_current)
# Backward processing
bwd_m_out_lst = []
bwd_state_out_lst = []
with vs.variable_scope("bwd"):
for block in range(len(bwd_inputs)):
# Reverse the blocks
bwd_inputs_reverse = bwd_inputs[block][::-1]
bwd_m_out_lst_current, bwd_state_out_lst_current = self._compute(
bwd_inputs_reverse, block, state, batch_size,
state_prefix="bwd_state", state_is_tuple=True)
bwd_m_out_lst.extend(bwd_m_out_lst_current)
bwd_state_out_lst.extend(bwd_state_out_lst_current)
state_out = self._state_tuple_type(*(fwd_state_out_lst + bwd_state_out_lst))
# Outputs are always concated as it is never used separately.
m_out = array_ops.concat(fwd_m_out_lst + bwd_m_out_lst, 1)
return m_out, state_out
# pylint: disable=protected-access
_Linear = core_rnn_cell._Linear # pylint: disable=invalid-name
# pylint: enable=protected-access
class AttentionCellWrapper(rnn_cell_impl.RNNCell):
"""Basic attention cell wrapper.
Implementation based on https://arxiv.org/abs/1409.0473.
"""
def __init__(self, cell, attn_length, attn_size=None, attn_vec_size=None,
input_size=None, state_is_tuple=True, reuse=None):
"""Create a cell with attention.
Args:
cell: an RNNCell, an attention is added to it.
attn_length: integer, the size of an attention window.
attn_size: integer, the size of an attention vector. Equal to
cell.output_size by default.
attn_vec_size: integer, the number of convolutional features calculated
on attention state and a size of the hidden layer built from
base cell state. Equal attn_size to by default.
input_size: integer, the size of a hidden linear layer,
built from inputs and attention. Derived from the input tensor
by default.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. By default (False), the states are all
concatenated along the column axis.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
Raises:
TypeError: if cell is not an RNNCell.
ValueError: if cell returns a state tuple but the flag
`state_is_tuple` is `False` or if attn_length is zero or less.
"""
super(AttentionCellWrapper, self).__init__(_reuse=reuse)
if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access
raise TypeError("The parameter cell is not RNNCell.")
if nest.is_sequence(cell.state_size) and not state_is_tuple:
raise ValueError("Cell returns tuple of states, but the flag "
"state_is_tuple is not set. State size is: %s"
% str(cell.state_size))
if attn_length <= 0:
raise ValueError("attn_length should be greater than zero, got %s"
% str(attn_length))
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if attn_size is None:
attn_size = cell.output_size
if attn_vec_size is None:
attn_vec_size = attn_size
self._state_is_tuple = state_is_tuple
self._cell = cell
self._attn_vec_size = attn_vec_size
self._input_size = input_size
self._attn_size = attn_size
self._attn_length = attn_length
self._reuse = reuse
self._linear1 = None
self._linear2 = None
self._linear3 = None
@property
def state_size(self):
size = (self._cell.state_size, self._attn_size,
self._attn_size * self._attn_length)
if self._state_is_tuple:
return size
else:
return sum(list(size))
@property
def output_size(self):
return self._attn_size
def call(self, inputs, state):
"""Long short-term memory cell with attention (LSTMA)."""
if self._state_is_tuple:
state, attns, attn_states = state
else:
states = state
state = array_ops.slice(states, [0, 0], [-1, self._cell.state_size])
attns = array_ops.slice(
states, [0, self._cell.state_size], [-1, self._attn_size])
attn_states = array_ops.slice(
states, [0, self._cell.state_size + self._attn_size],
[-1, self._attn_size * self._attn_length])
attn_states = array_ops.reshape(attn_states,
[-1, self._attn_length, self._attn_size])
input_size = self._input_size
if input_size is None:
input_size = inputs.get_shape().as_list()[1]
if self._linear1 is None:
self._linear1 = _Linear([inputs, attns], input_size, True)
inputs = self._linear1([inputs, attns])
cell_output, new_state = self._cell(inputs, state)
if self._state_is_tuple:
new_state_cat = array_ops.concat(nest.flatten(new_state), 1)
else:
new_state_cat = new_state
new_attns, new_attn_states = self._attention(new_state_cat, attn_states)
with vs.variable_scope("attn_output_projection"):
if self._linear2 is None:
self._linear2 = _Linear([cell_output, new_attns], self._attn_size, True)
output = self._linear2([cell_output, new_attns])
new_attn_states = array_ops.concat(
[new_attn_states, array_ops.expand_dims(output, 1)], 1)
new_attn_states = array_ops.reshape(
new_attn_states, [-1, self._attn_length * self._attn_size])
new_state = (new_state, new_attns, new_attn_states)
if not self._state_is_tuple:
new_state = array_ops.concat(list(new_state), 1)
return output, new_state
def _attention(self, query, attn_states):
conv2d = nn_ops.conv2d
reduce_sum = math_ops.reduce_sum
softmax = nn_ops.softmax
tanh = math_ops.tanh
with vs.variable_scope("attention"):
k = vs.get_variable(
"attn_w", [1, 1, self._attn_size, self._attn_vec_size])
v = vs.get_variable("attn_v", [self._attn_vec_size])
hidden = array_ops.reshape(attn_states,
[-1, self._attn_length, 1, self._attn_size])
hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
if self._linear3 is None:
self._linear3 = _Linear(query, self._attn_vec_size, True)
y = self._linear3(query)
y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
a = softmax(s)
d = reduce_sum(
array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
new_attns = array_ops.reshape(d, [-1, self._attn_size])
new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
return new_attns, new_attn_states
class HighwayWrapper(rnn_cell_impl.RNNCell):
"""RNNCell wrapper that adds highway connection on cell input and output.
Based on:
R. K. Srivastava, K. Greff, and J. Schmidhuber, "Highway networks",
arXiv preprint arXiv:1505.00387, 2015.
https://arxiv.org/abs/1505.00387
"""
def __init__(self, cell,
couple_carry_transform_gates=True,
carry_bias_init=1.0):
"""Constructs a `HighwayWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
couple_carry_transform_gates: boolean, should the Carry and Transform gate
be coupled.
carry_bias_init: float, carry gates bias initialization.
"""
self._cell = cell
self._couple_carry_transform_gates = couple_carry_transform_gates
self._carry_bias_init = carry_bias_init
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def _highway(self, inp, out):
input_size = inp.get_shape().with_rank(2)[1].value
carry_weight = vs.get_variable("carry_w", [input_size, input_size])
carry_bias = vs.get_variable(
"carry_b", [input_size],
initializer=init_ops.constant_initializer(
self._carry_bias_init))
carry = math_ops.sigmoid(nn_ops.xw_plus_b(inp, carry_weight, carry_bias))
if self._couple_carry_transform_gates:
transform = 1 - carry
else:
transform_weight = vs.get_variable("transform_w",
[input_size, input_size])
transform_bias = vs.get_variable(
"transform_b", [input_size],
initializer=init_ops.constant_initializer(
-self._carry_bias_init))
transform = math_ops.sigmoid(nn_ops.xw_plus_b(inp,
transform_weight,
transform_bias))
return inp * carry + out * transform
def __call__(self, inputs, state, scope=None):
"""Run the cell and add its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
scope: optional cell scope.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = self._cell(inputs, state, scope=scope)
nest.assert_same_structure(inputs, outputs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
nest.map_structure(assert_shape_match, inputs, outputs)
res_outputs = nest.map_structure(self._highway, inputs, outputs)
return (res_outputs, new_state)
class LayerNormBasicLSTMCell(rnn_cell_impl.RNNCell):
"""LSTM unit with layer normalization and recurrent dropout.
This class adds layer normalization and recurrent dropout to a
basic LSTM unit. Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
Recurrent dropout is base on:
https://arxiv.org/abs/1603.05118
"Recurrent Dropout without Memory Loss"
Stanislau Semeniuta, Aliaksei Severyn, Erhardt Barth.
"""
def __init__(self, num_units, forget_bias=1.0,
input_size=None, activation=math_ops.tanh,
layer_norm=True, norm_gain=1.0, norm_shift=0.0,
dropout_keep_prob=1.0, dropout_prob_seed=None,
reuse=None):
"""Initializes the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
activation: Activation function of the inner states.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
dropout_keep_prob: unit Tensor or float between 0 and 1 representing the
recurrent dropout probability value. If float and 1.0, no dropout will
be applied.
dropout_prob_seed: (optional) integer, the randomness seed.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(LayerNormBasicLSTMCell, self).__init__(_reuse=reuse)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._activation = activation
self._forget_bias = forget_bias
self._keep_prob = dropout_keep_prob
self._seed = dropout_prob_seed
self._layer_norm = layer_norm
self._norm_gain = norm_gain
self._norm_shift = norm_shift
self._reuse = reuse
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def _norm(self, inp, scope, dtype=dtypes.float32):
shape = inp.get_shape()[-1:]
gamma_init = init_ops.constant_initializer(self._norm_gain)
beta_init = init_ops.constant_initializer(self._norm_shift)
with vs.variable_scope(scope):
# Initialize beta and gamma for use by layer_norm.
vs.get_variable("gamma", shape=shape, initializer=gamma_init, dtype=dtype)
vs.get_variable("beta", shape=shape, initializer=beta_init, dtype=dtype)
normalized = layers.layer_norm(inp, reuse=True, scope=scope)
return normalized
def _linear(self, args):
out_size = 4 * self._num_units
proj_size = args.get_shape()[-1]
dtype = args.dtype
weights = vs.get_variable("kernel", [proj_size, out_size], dtype=dtype)
out = math_ops.matmul(args, weights)
if not self._layer_norm:
bias = vs.get_variable("bias", [out_size], dtype=dtype)
out = nn_ops.bias_add(out, bias)
return out
def call(self, inputs, state):
"""LSTM cell with layer normalization and recurrent dropout."""
c, h = state
args = array_ops.concat([inputs, h], 1)
concat = self._linear(args)
dtype = args.dtype
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = self._norm(i, "input", dtype=dtype)
j = self._norm(j, "transform", dtype=dtype)
f = self._norm(f, "forget", dtype=dtype)
o = self._norm(o, "output", dtype=dtype)
g = self._activation(j)
if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)
new_c = (c * math_ops.sigmoid(f + self._forget_bias)
+ math_ops.sigmoid(i) * g)
if self._layer_norm:
new_c = self._norm(new_c, "state", dtype=dtype)
new_h = self._activation(new_c) * math_ops.sigmoid(o)
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class NASCell(rnn_cell_impl.RNNCell):
"""Neural Architecture Search (NAS) recurrent network cell.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.01578
Barret Zoph and Quoc V. Le.
"Neural Architecture Search with Reinforcement Learning" Proc. ICLR 2017.
The class uses an optional projection layer.
"""
def __init__(self, num_units, num_proj=None,
use_biases=False, reuse=None):
"""Initialize the parameters for a NAS cell.
Args:
num_units: int, The number of units in the NAS cell
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
use_biases: (optional) bool, If True then use biases within the cell. This
is False by default.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(NASCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._num_proj = num_proj
self._use_biases = use_biases
self._reuse = reuse
if num_proj is not None:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def call(self, inputs, state):
"""Run one step of NAS Cell.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: This must be a tuple of state Tensors, both `2-D`, with column
sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
NAS Cell after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of NAS Cell after reading `inputs`
when the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
relu = nn_ops.relu
num_proj = self._num_units if self._num_proj is None else self._num_proj
(c_prev, m_prev) = state
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
# Variables for the NAS cell. W_m is all matrices multiplying the
# hiddenstate and W_inputs is all matrices multiplying the inputs.
concat_w_m = vs.get_variable(
"recurrent_kernel", [num_proj, 8 * self._num_units],
dtype)
concat_w_inputs = vs.get_variable(
"kernel", [input_size.value, 8 * self._num_units],
dtype)
m_matrix = math_ops.matmul(m_prev, concat_w_m)
inputs_matrix = math_ops.matmul(inputs, concat_w_inputs)
if self._use_biases:
b = vs.get_variable(
"bias",
shape=[8 * self._num_units],
initializer=init_ops.zeros_initializer(),
dtype=dtype)
m_matrix = nn_ops.bias_add(m_matrix, b)
# The NAS cell branches into 8 different splits for both the hiddenstate
# and the input
m_matrix_splits = array_ops.split(axis=1, num_or_size_splits=8,
value=m_matrix)
inputs_matrix_splits = array_ops.split(axis=1, num_or_size_splits=8,
value=inputs_matrix)
# First layer
layer1_0 = sigmoid(inputs_matrix_splits[0] + m_matrix_splits[0])
layer1_1 = relu(inputs_matrix_splits[1] + m_matrix_splits[1])
layer1_2 = sigmoid(inputs_matrix_splits[2] + m_matrix_splits[2])
layer1_3 = relu(inputs_matrix_splits[3] * m_matrix_splits[3])
layer1_4 = tanh(inputs_matrix_splits[4] + m_matrix_splits[4])
layer1_5 = sigmoid(inputs_matrix_splits[5] + m_matrix_splits[5])
layer1_6 = tanh(inputs_matrix_splits[6] + m_matrix_splits[6])
layer1_7 = sigmoid(inputs_matrix_splits[7] + m_matrix_splits[7])
# Second layer
l2_0 = tanh(layer1_0 * layer1_1)
l2_1 = tanh(layer1_2 + layer1_3)
l2_2 = tanh(layer1_4 * layer1_5)
l2_3 = sigmoid(layer1_6 + layer1_7)
# Inject the cell
l2_0 = tanh(l2_0 + c_prev)
# Third layer
l3_0_pre = l2_0 * l2_1
new_c = l3_0_pre # create new cell
l3_0 = l3_0_pre
l3_1 = tanh(l2_2 + l2_3)
# Final layer
new_m = tanh(l3_0 * l3_1)
# Projection layer if specified
if self._num_proj is not None:
concat_w_proj = vs.get_variable(
"projection_weights", [self._num_units, self._num_proj],
dtype)
new_m = math_ops.matmul(new_m, concat_w_proj)
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_m)
return new_m, new_state
class UGRNNCell(rnn_cell_impl.RNNCell):
"""Update Gate Recurrent Neural Network (UGRNN) cell.
Compromise between a LSTM/GRU and a vanilla RNN. There is only one
gate, and that is to determine whether the unit should be
integrating or computing instantaneously. This is the recurrent
idea of the feedforward Highway Network.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.09913
Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo.
"Capacity and Trainability in Recurrent Neural Networks" Proc. ICLR 2017.
"""
def __init__(self, num_units, initializer=None, forget_bias=1.0,
activation=math_ops.tanh, reuse=None):
"""Initialize the parameters for an UGRNN cell.
Args:
num_units: int, The number of units in the UGRNN cell
initializer: (optional) The initializer to use for the weight matrices.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gate, used to reduce the scale of forgetting at the beginning
of the training.
activation: (optional) Activation function of the inner states.
Default is `tf.tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(UGRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._forget_bias = forget_bias
self._activation = activation
self._reuse = reuse
self._linear = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Run one step of UGRNN.
Args:
inputs: input Tensor, 2D, batch x input size.
state: state Tensor, 2D, batch x num units.
Returns:
new_output: batch x num units, Tensor representing the output of the UGRNN
after reading `inputs` when previous state was `state`. Identical to
`new_state`.
new_state: batch x num units, Tensor representing the state of the UGRNN
after reading `inputs` when previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
sigmoid = math_ops.sigmoid
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(vs.get_variable_scope(),
initializer=self._initializer):
cell_inputs = array_ops.concat([inputs, state], 1)
if self._linear is None:
self._linear = _Linear(cell_inputs, 2 * self._num_units, True)
rnn_matrix = self._linear(cell_inputs)
[g_act, c_act] = array_ops.split(
axis=1, num_or_size_splits=2, value=rnn_matrix)
c = self._activation(c_act)
g = sigmoid(g_act + self._forget_bias)
new_state = g * state + (1.0 - g) * c
new_output = new_state
return new_output, new_state
class IntersectionRNNCell(rnn_cell_impl.RNNCell):
"""Intersection Recurrent Neural Network (+RNN) cell.
Architecture with coupled recurrent gate as well as coupled depth
gate, designed to improve information flow through stacked RNNs. As the
architecture uses depth gating, the dimensionality of the depth
output (y) also should not change through depth (input size == output size).
To achieve this, the first layer of a stacked Intersection RNN projects
the inputs to N (num units) dimensions. Therefore when initializing an
IntersectionRNNCell, one should set `num_in_proj = N` for the first layer
and use default settings for subsequent layers.
This implements the recurrent cell from the paper:
https://arxiv.org/abs/1611.09913
Jasmine Collins, Jascha Sohl-Dickstein, and David Sussillo.
"Capacity and Trainability in Recurrent Neural Networks" Proc. ICLR 2017.
The Intersection RNN is built for use in deeply stacked
RNNs so it may not achieve best performance with depth 1.
"""
def __init__(self, num_units, num_in_proj=None,
initializer=None, forget_bias=1.0,
y_activation=nn_ops.relu, reuse=None):
"""Initialize the parameters for an +RNN cell.
Args:
num_units: int, The number of units in the +RNN cell
num_in_proj: (optional) int, The input dimensionality for the RNN.
If creating the first layer of an +RNN, this should be set to
`num_units`. Otherwise, this should be set to `None` (default).
If `None`, dimensionality of `inputs` should be equal to `num_units`,
otherwise ValueError is thrown.
initializer: (optional) The initializer to use for the weight matrices.
forget_bias: (optional) float, default 1.0, The initial bias of the
forget gates, used to reduce the scale of forgetting at the beginning
of the training.
y_activation: (optional) Activation function of the states passed
through depth. Default is 'tf.nn.relu`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(IntersectionRNNCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._forget_bias = forget_bias
self._num_input_proj = num_in_proj
self._y_activation = y_activation
self._reuse = reuse
self._linear1 = None
self._linear2 = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Run one step of the Intersection RNN.
Args:
inputs: input Tensor, 2D, batch x input size.
state: state Tensor, 2D, batch x num units.
Returns:
new_y: batch x num units, Tensor representing the output of the +RNN
after reading `inputs` when previous state was `state`.
new_state: batch x num units, Tensor representing the state of the +RNN
after reading `inputs` when previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from `inputs` via
static shape inference.
ValueError: If input size != output size (these must be equal when
using the Intersection RNN).
"""
sigmoid = math_ops.sigmoid
tanh = math_ops.tanh
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
with vs.variable_scope(vs.get_variable_scope(),
initializer=self._initializer):
# read-in projections (should be used for first layer in deep +RNN
# to transform size of inputs from I --> N)
if input_size.value != self._num_units:
if self._num_input_proj:
with vs.variable_scope("in_projection"):
if self._linear1 is None:
self._linear1 = _Linear(inputs, self._num_units, True)
inputs = self._linear1(inputs)
else:
raise ValueError("Must have input size == output size for "
"Intersection RNN. To fix, num_in_proj should "
"be set to num_units at cell init.")
n_dim = i_dim = self._num_units
cell_inputs = array_ops.concat([inputs, state], 1)
if self._linear2 is None:
self._linear2 = _Linear(cell_inputs, 2*n_dim + 2*i_dim, True)
rnn_matrix = self._linear2(cell_inputs)
gh_act = rnn_matrix[:, :n_dim] # b x n
h_act = rnn_matrix[:, n_dim:2*n_dim] # b x n
gy_act = rnn_matrix[:, 2*n_dim:2*n_dim+i_dim] # b x i
y_act = rnn_matrix[:, 2*n_dim+i_dim:2*n_dim+2*i_dim] # b x i
h = tanh(h_act)
y = self._y_activation(y_act)
gh = sigmoid(gh_act + self._forget_bias)
gy = sigmoid(gy_act + self._forget_bias)
new_state = gh * state + (1.0 - gh) * h # passed thru time
new_y = gy * inputs + (1.0 - gy) * y # passed thru depth
return new_y, new_state
_REGISTERED_OPS = None
class CompiledWrapper(rnn_cell_impl.RNNCell):
"""Wraps step execution in an XLA JIT scope."""
def __init__(self, cell, compile_stateful=False):
"""Create CompiledWrapper cell.
Args:
cell: Instance of `RNNCell`.
compile_stateful: Whether to compile stateful ops like initializers
and random number generators (default: False).
"""
self._cell = cell
self._compile_stateful = compile_stateful
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self._cell.zero_state(batch_size, dtype)
def __call__(self, inputs, state, scope=None):
if self._compile_stateful:
compile_ops = True
else:
def compile_ops(node_def):
global _REGISTERED_OPS
if _REGISTERED_OPS is None:
_REGISTERED_OPS = op_def_registry.get_registered_ops()
return not _REGISTERED_OPS[node_def.op].is_stateful
with jit.experimental_jit_scope(compile_ops=compile_ops):
return self._cell(inputs, state, scope)
def _random_exp_initializer(minval,
maxval,
seed=None,
dtype=dtypes.float32):
"""Returns an exponential distribution initializer.
Args:
minval: float or a scalar float Tensor. With value > 0. Lower bound of the
range of random values to generate.
maxval: float or a scalar float Tensor. With value > minval. Upper bound of
the range of random values to generate.
seed: An integer. Used to create random seeds.
dtype: The data type.
Returns:
An initializer that generates tensors with an exponential distribution.
"""
def _initializer(shape, dtype=dtype, partition_info=None):
del partition_info # Unused.
return math_ops.exp(
random_ops.random_uniform(
shape,
math_ops.log(minval),
math_ops.log(maxval),
dtype,
seed=seed))
return _initializer
class PhasedLSTMCell(rnn_cell_impl.RNNCell):
"""Phased LSTM recurrent network cell.
https://arxiv.org/pdf/1610.09513v1.pdf
"""
def __init__(self,
num_units,
use_peepholes=False,
leak=0.001,
ratio_on=0.1,
trainable_ratio_on=True,
period_init_min=1.0,
period_init_max=1000.0,
reuse=None):
"""Initialize the Phased LSTM cell.
Args:
num_units: int, The number of units in the Phased LSTM cell.
use_peepholes: bool, set True to enable peephole connections.
leak: float or scalar float Tensor with value in [0, 1]. Leak applied
during training.
ratio_on: float or scalar float Tensor with value in [0, 1]. Ratio of the
period during which the gates are open.
trainable_ratio_on: bool, weather ratio_on is trainable.
period_init_min: float or scalar float Tensor. With value > 0.
Minimum value of the initialized period.
The period values are initialized by drawing from the distribution:
e^U(log(period_init_min), log(period_init_max))
Where U(.,.) is the uniform distribution.
period_init_max: float or scalar float Tensor.
With value > period_init_min. Maximum value of the initialized period.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(PhasedLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._leak = leak
self._ratio_on = ratio_on
self._trainable_ratio_on = trainable_ratio_on
self._period_init_min = period_init_min
self._period_init_max = period_init_max
self._reuse = reuse
self._linear1 = None
self._linear2 = None
self._linear3 = None
@property
def state_size(self):
return rnn_cell_impl.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def _mod(self, x, y):
"""Modulo function that propagates x gradients."""
return array_ops.stop_gradient(math_ops.mod(x, y) - x) + x
def _get_cycle_ratio(self, time, phase, period):
"""Compute the cycle ratio in the dtype of the time."""
phase_casted = math_ops.cast(phase, dtype=time.dtype)
period_casted = math_ops.cast(period, dtype=time.dtype)
shifted_time = time - phase_casted
cycle_ratio = self._mod(shifted_time, period_casted) / period_casted
return math_ops.cast(cycle_ratio, dtype=dtypes.float32)
def call(self, inputs, state):
"""Phased LSTM Cell.
Args:
inputs: A tuple of 2 Tensor.
The first Tensor has shape [batch, 1], and type float32 or float64.
It stores the time.
The second Tensor has shape [batch, features_size], and type float32.
It stores the features.
state: rnn_cell_impl.LSTMStateTuple, state from previous timestep.
Returns:
A tuple containing:
- A Tensor of float32, and shape [batch_size, num_units], representing the
output of the cell.
- A rnn_cell_impl.LSTMStateTuple, containing 2 Tensors of float32, shape
[batch_size, num_units], representing the new state and the output.
"""
(c_prev, h_prev) = state
(time, x) = inputs
in_mask_gates = [x, h_prev]
if self._use_peepholes:
in_mask_gates.append(c_prev)
with vs.variable_scope("mask_gates"):
if self._linear1 is None:
self._linear1 = _Linear(in_mask_gates, 2 * self._num_units, True)
mask_gates = math_ops.sigmoid(
self._linear1(in_mask_gates))
[input_gate, forget_gate] = array_ops.split(
axis=1, num_or_size_splits=2, value=mask_gates)
with vs.variable_scope("new_input"):
if self._linear2 is None:
self._linear2 = _Linear([x, h_prev], self._num_units, True)
new_input = math_ops.tanh(self._linear2([x, h_prev]))
new_c = (c_prev * forget_gate + input_gate * new_input)
in_out_gate = [x, h_prev]
if self._use_peepholes:
in_out_gate.append(new_c)
with vs.variable_scope("output_gate"):
if self._linear3 is None:
self._linear3 = _Linear(in_out_gate, self._num_units, True)
output_gate = math_ops.sigmoid(self._linear3(in_out_gate))
new_h = math_ops.tanh(new_c) * output_gate
period = vs.get_variable(
"period", [self._num_units],
initializer=_random_exp_initializer(
self._period_init_min, self._period_init_max))
phase = vs.get_variable(
"phase", [self._num_units],
initializer=init_ops.random_uniform_initializer(
0., period.initial_value))
ratio_on = vs.get_variable(
"ratio_on", [self._num_units],
initializer=init_ops.constant_initializer(self._ratio_on),
trainable=self._trainable_ratio_on)
cycle_ratio = self._get_cycle_ratio(time, phase, period)
k_up = 2 * cycle_ratio / ratio_on
k_down = 2 - k_up
k_closed = self._leak * cycle_ratio
k = array_ops.where(cycle_ratio < ratio_on, k_down, k_closed)
k = array_ops.where(cycle_ratio < 0.5 * ratio_on, k_up, k)
new_c = k * new_c + (1 - k) * c_prev
new_h = k * new_h + (1 - k) * h_prev
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
class ConvLSTMCell(rnn_cell_impl.RNNCell):
"""Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self,
conv_ndims,
input_shape,
output_channels,
kernel_shape,
use_bias=True,
skip_connection=False,
forget_bias=1.0,
initializers=None,
name="conv_lstm_cell"):
"""Construct ConvLSTMCell.
Args:
conv_ndims: Convolution dimensionality (1, 2 or 3).
input_shape: Shape of the input as int tuple, excluding the batch size.
output_channels: int, number of output channels of the conv LSTM.
kernel_shape: Shape of kernel as in tuple (of size 1,2 or 3).
use_bias: Use bias in convolutions.
skip_connection: If set to `True`, concatenate the input to the
output of the conv LSTM. Default: `False`.
forget_bias: Forget bias.
name: Name of the module.
Raises:
ValueError: If `skip_connection` is `True` and stride is different from 1
or if `input_shape` is incompatible with `conv_ndims`.
"""
super(ConvLSTMCell, self).__init__(name=name)
if conv_ndims != len(input_shape)-1:
raise ValueError("Invalid input_shape {} for conv_ndims={}.".format(
input_shape, conv_ndims))
self._conv_ndims = conv_ndims
self._input_shape = input_shape
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._use_bias = use_bias
self._forget_bias = forget_bias
self._skip_connection = skip_connection
self._total_output_channels = output_channels
if self._skip_connection:
self._total_output_channels += self._input_shape[-1]
state_size = tensor_shape.TensorShape(self._input_shape[:-1]
+ [self._output_channels])
self._state_size = rnn_cell_impl.LSTMStateTuple(state_size, state_size)
self._output_size = tensor_shape.TensorShape(self._input_shape[:-1]
+ [self._total_output_channels])
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def call(self, inputs, state, scope=None):
cell, hidden = state
new_hidden = _conv([inputs, hidden],
self._kernel_shape,
4*self._output_channels,
self._use_bias)
gates = array_ops.split(value=new_hidden,
num_or_size_splits=4,
axis=self._conv_ndims+1)
input_gate, new_input, forget_gate, output_gate = gates
new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell
new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input)
output = math_ops.tanh(new_cell) * math_ops.sigmoid(output_gate)
if self._skip_connection:
output = array_ops.concat([output, inputs], axis=-1)
new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)
return output, new_state
class Conv1DLSTMCell(ConvLSTMCell):
"""1D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_1d_lstm_cell", **kwargs):
"""Construct Conv1DLSTM. See `ConvLSTMCell` for more details."""
super(Conv1DLSTMCell, self).__init__(conv_ndims=1, **kwargs)
class Conv2DLSTMCell(ConvLSTMCell):
"""2D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_2d_lstm_cell", **kwargs):
"""Construct Conv2DLSTM. See `ConvLSTMCell` for more details."""
super(Conv2DLSTMCell, self).__init__(conv_ndims=2, **kwargs)
class Conv3DLSTMCell(ConvLSTMCell):
"""3D Convolutional LSTM recurrent network cell.
https://arxiv.org/pdf/1506.04214v1.pdf
"""
def __init__(self, name="conv_3d_lstm_cell", **kwargs):
"""Construct Conv3DLSTM. See `ConvLSTMCell` for more details."""
super(Conv3DLSTMCell, self).__init__(conv_ndims=3, **kwargs)
def _conv(args,
filter_size,
num_features,
bias,
bias_start=0.0):
"""convolution:
Args:
args: a Tensor or a list of Tensors of dimension 3D, 4D or 5D,
batch x n, Tensors.
filter_size: int tuple of filter height and width.
num_features: int, number of features.
bias_start: starting value to initialize the bias; 0 by default.
Returns:
A 3D, 4D, or 5D Tensor with shape [batch ... num_features]
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
# Calculate the total size of arguments on dimension 1.
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
shape_length = len(shapes[0])
for shape in shapes:
if len(shape) not in [3,4,5]:
raise ValueError("Conv Linear expects 3D, 4D "
"or 5D arguments: %s" % str(shapes))
if len(shape) != len(shapes[0]):
raise ValueError("Conv Linear expects all args "
"to be of same Dimension: %s" % str(shapes))
else:
total_arg_size_depth += shape[-1]
dtype = [a.dtype for a in args][0]
# determine correct conv operation
if shape_length == 3:
conv_op = nn_ops.conv1d
strides = 1
elif shape_length == 4:
conv_op = nn_ops.conv2d
strides = shape_length*[1]
elif shape_length == 5:
conv_op = nn_ops.conv3d
strides = shape_length*[1]
# Now the computation.
kernel = vs.get_variable(
"kernel",
filter_size + [total_arg_size_depth, num_features],
dtype=dtype)
if len(args) == 1:
res = conv_op(args[0],
kernel,
strides,
padding='SAME')
else:
res = conv_op(array_ops.concat(axis=shape_length-1, values=args),
kernel,
strides,
padding='SAME')
if not bias:
return res
bias_term = vs.get_variable(
"biases", [num_features],
dtype=dtype,
initializer=init_ops.constant_initializer(
bias_start, dtype=dtype))
return res + bias_term
class GLSTMCell(rnn_cell_impl.RNNCell):
"""Group LSTM cell (G-LSTM).
The implementation is based on:
https://arxiv.org/abs/1703.10722
O. Kuchaiev and B. Ginsburg
"Factorization Tricks for LSTM Networks", ICLR 2017 workshop.
"""
def __init__(self, num_units, initializer=None, num_proj=None,
number_of_groups=1, forget_bias=1.0, activation=math_ops.tanh,
reuse=None):
"""Initialize the parameters of G-LSTM cell.
Args:
num_units: int, The number of units in the G-LSTM cell
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
number_of_groups: (optional) int, number of groups to use.
If `number_of_groups` is 1, then it should be equivalent to LSTM cell
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already
has the given variables, an error is raised.
Raises:
ValueError: If `num_units` or `num_proj` is not divisible by
`number_of_groups`.
"""
super(GLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._initializer = initializer
self._num_proj = num_proj
self._forget_bias = forget_bias
self._activation = activation
self._number_of_groups = number_of_groups
if self._num_units % self._number_of_groups != 0:
raise ValueError("num_units must be divisible by number_of_groups")
if self._num_proj:
if self._num_proj % self._number_of_groups != 0:
raise ValueError("num_proj must be divisible by number_of_groups")
self._group_shape = [int(self._num_proj / self._number_of_groups),
int(self._num_units / self._number_of_groups)]
else:
self._group_shape = [int(self._num_units / self._number_of_groups),
int(self._num_units / self._number_of_groups)]
if num_proj:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_proj)
self._output_size = num_proj
else:
self._state_size = rnn_cell_impl.LSTMStateTuple(num_units, num_units)
self._output_size = num_units
self._linear1 = None
self._linear2 = None
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def _get_input_for_group(self, inputs, group_id, group_size):
"""Slices inputs into groups to prepare for processing by cell's groups
Args:
inputs: cell input or it's previous state,
a Tensor, 2D, [batch x num_units]
group_id: group id, a Scalar, for which to prepare input
group_size: size of the group
Returns:
subset of inputs corresponding to group "group_id",
a Tensor, 2D, [batch x num_units/number_of_groups]
"""
return array_ops.slice(input_=inputs,
begin=[0, group_id * group_size],
size=[self._batch_size, group_size],
name=("GLSTM_group%d_input_generation" % group_id))
def call(self, inputs, state):
"""Run one step of G-LSTM.
Args:
inputs: input Tensor, 2D, [batch x num_units].
state: this must be a tuple of state Tensors, both `2-D`,
with column sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
G-LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- LSTMStateTuple representing the new state of G-LSTM cell
after reading `inputs` when the previous state was `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
(c_prev, m_prev) = state
self._batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
dtype = inputs.dtype
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer):
i_parts = []
j_parts = []
f_parts = []
o_parts = []
for group_id in range(self._number_of_groups):
with vs.variable_scope("group%d" % group_id):
x_g_id = array_ops.concat(
[self._get_input_for_group(inputs, group_id,
self._group_shape[0]),
self._get_input_for_group(m_prev, group_id,
self._group_shape[0])], axis=1)
if self._linear1 is None:
self._linear1 = _Linear(x_g_id, 4 * self._group_shape[1], False)
R_k = self._linear1(x_g_id) # pylint: disable=invalid-name
i_k, j_k, f_k, o_k = array_ops.split(R_k, 4, 1)
i_parts.append(i_k)
j_parts.append(j_k)
f_parts.append(f_k)
o_parts.append(o_k)
bi = vs.get_variable(name="bias_i",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
bj = vs.get_variable(name="bias_j",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
bf = vs.get_variable(name="bias_f",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
bo = vs.get_variable(name="bias_o",
shape=[self._num_units],
dtype=dtype,
initializer=
init_ops.constant_initializer(0.0, dtype=dtype))
i = nn_ops.bias_add(array_ops.concat(i_parts, axis=1), bi)
j = nn_ops.bias_add(array_ops.concat(j_parts, axis=1), bj)
f = nn_ops.bias_add(array_ops.concat(f_parts, axis=1), bf)
o = nn_ops.bias_add(array_ops.concat(o_parts, axis=1), bo)
c = (math_ops.sigmoid(f + self._forget_bias) * c_prev +
math_ops.sigmoid(i) * math_ops.tanh(j))
m = math_ops.sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection"):
if self._linear2 is None:
self._linear2 = _Linear(m, self._num_proj, False)
m = self._linear2(m)
new_state = rnn_cell_impl.LSTMStateTuple(c, m)
return m, new_state
class LayerNormLSTMCell(rnn_cell_impl.RNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
http://www.bioinf.jku.at/publications/older/2604.pdf
S. Hochreiter and J. Schmidhuber.
"Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
Layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
and is applied before the internal nonlinearities.
"""
def __init__(self, num_units,
use_peepholes=False, cell_clip=None,
initializer=None, num_proj=None, proj_clip=None,
forget_bias=1.0,
activation=None, layer_norm=False,
norm_gain=1.0, norm_shift=0.0, reuse=None):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
forget_bias: Biases of the forget gate are initialized by default to 1
in order to reduce the scale of forgetting at the beginning of
the training. Must set it manually to `0.0` when restoring from
CudnnLSTM trained checkpoints.
activation: Activation function of the inner states. Default: `tanh`.
layer_norm: If `True`, layer normalization will be applied.
norm_gain: float, The layer normalization gain initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
norm_shift: float, The layer normalization shift initial value. If
`layer_norm` has been set to `False`, this argument will be ignored.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
When restoring from CudnnLSTM-trained checkpoints, must use
CudnnCompatibleLSTMCell instead.
"""
super(LayerNormLSTMCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._forget_bias = forget_bias
self._activation = activation or math_ops.tanh
self._layer_norm = layer_norm
self._norm_gain = norm_gain
self._norm_shift = norm_shift
if num_proj:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_proj))
self._output_size = num_proj
else:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_units))
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
def _linear(self,
args,
output_size,
bias,
bias_initializer=None,
kernel_initializer=None,
layer_norm=False):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a Variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
layer_norm: boolean, whether to apply layer normalization.
Returns:
A 2D Tensor with shape [batch x output_size] taking value
sum_i(args[i] * W[i]), where each W[i] is a newly created Variable.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now the computation.
scope = vs.get_variable_scope()
with vs.variable_scope(scope) as outer_scope:
weights = vs.get_variable(
"kernel", [total_arg_size, output_size],
dtype=dtype,
initializer=kernel_initializer)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, 1), weights)
if not bias:
return res
with vs.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
if bias_initializer is None:
bias_initializer = init_ops.constant_initializer(0.0, dtype=dtype)
biases = vs.get_variable(
"bias", [output_size],
dtype=dtype,
initializer=bias_initializer)
if not layer_norm:
res = nn_ops.bias_add(res, biases)
return res
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, 2D, batch x num_units.
state: this must be a tuple of state Tensors,
both `2-D`, with column sizes `c_state` and
`m_state`.
Returns:
A tuple containing:
- A `2-D, [batch x output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = math_ops.sigmoid
(c_prev, m_prev) = state
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if input_size.value is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
scope = vs.get_variable_scope()
with vs.variable_scope(scope, initializer=self._initializer) as unit_scope:
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = self._linear([inputs, m_prev], 4 * self._num_units, bias=True,
bias_initializer=None, layer_norm=self._layer_norm)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = _norm(self._norm_gain, self._norm_shift, i, "input")
j = _norm(self._norm_gain, self._norm_shift, j, "transform")
f = _norm(self._norm_gain, self._norm_shift, f, "forget")
o = _norm(self._norm_gain, self._norm_shift, o, "output")
# Diagonal connections
if self._use_peepholes:
with vs.variable_scope(unit_scope) as projection_scope:
w_f_diag = vs.get_variable(
"w_f_diag", shape=[self._num_units], dtype=dtype)
w_i_diag = vs.get_variable(
"w_i_diag", shape=[self._num_units], dtype=dtype)
w_o_diag = vs.get_variable(
"w_o_diag", shape=[self._num_units], dtype=dtype)
if self._use_peepholes:
c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev +
sigmoid(i + w_i_diag * c_prev) * self._activation(j))
else:
c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) *
self._activation(j))
if self._layer_norm:
c = _norm(self._norm_gain, self._norm_shift, c, "state")
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
with vs.variable_scope("projection") as proj_scope:
m = self._linear(m, self._num_proj, bias=False)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (rnn_cell_impl.LSTMStateTuple(c, m))
return m, new_state
|
{
"content_hash": "d5eb93f4f0446ba9c3ded337e9e8f4ef",
"timestamp": "",
"source": "github",
"line_count": 2605,
"max_line_length": 82,
"avg_line_length": 39.20921305182342,
"alnum_prop": 0.6250342666927746,
"repo_name": "horance-liu/tensorflow",
"id": "5e85c125df8ca0d632fa9b0db86d942bb354631e",
"size": "102830",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/rnn/python/ops/rnn_cell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8572"
},
{
"name": "C",
"bytes": "314095"
},
{
"name": "C++",
"bytes": "34056582"
},
{
"name": "CMake",
"bytes": "212134"
},
{
"name": "Go",
"bytes": "1005949"
},
{
"name": "Java",
"bytes": "533059"
},
{
"name": "Jupyter Notebook",
"bytes": "1940739"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "44794"
},
{
"name": "Objective-C",
"bytes": "8665"
},
{
"name": "Objective-C++",
"bytes": "75338"
},
{
"name": "PHP",
"bytes": "1429"
},
{
"name": "Perl",
"bytes": "6186"
},
{
"name": "Perl 6",
"bytes": "1360"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "29848838"
},
{
"name": "Ruby",
"bytes": "435"
},
{
"name": "Shell",
"bytes": "401557"
}
],
"symlink_target": ""
}
|
'''
Name: Problem 36: Double-base palindromes (45171)
The decimal number, 585 = 1001001001subscript(2) (binary), is palindromic in both bases.
Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.
(Please note that the palindromic number, in either base, may not include leading zeros.)
1. Find all palindromes less than a million
2. Convert all to binary and test if palindrome
'''
pal10=True
pal2=True
base10Pals=[]
for i in range(1,1000000):
length=str(int(len(str(i))/2))
for x in range(0,(int(length))):
if (str(i)[x]!=str(i)[(len(str(i))-x-1):(len(str(i))-x)]):
pal10 = False
if pal10:
bin="{0:b}".format(i)
length=str(int(len(str(bin))/2))
for x in range(0,(int(length))):
if (str(bin)[x]!=str(bin)[(len(str(bin))-x-1):(len(str(bin))-x)]):
pal2 = False
if pal2:
base10Pals.append(i)
pal2=True
pal10=True
print(sum(base10Pals))
|
{
"content_hash": "84c608c9a1599de517e2eb9ed79554ff",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 96,
"avg_line_length": 28.46875,
"alnum_prop": 0.677277716794731,
"repo_name": "gregljohnson/ProjectEuler",
"id": "37f5e9b1fcaa886780c13d55fea64464ce219e1b",
"size": "911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problem36.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "68119"
}
],
"symlink_target": ""
}
|
import SimpleHTTPServer
import SocketServer
PORT = 8000
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print("serving at port", PORT)
httpd.serve_forever()
|
{
"content_hash": "26ecd0a0a89f0e9774e0934813cd0ac7",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 51,
"avg_line_length": 19.727272727272727,
"alnum_prop": 0.7880184331797235,
"repo_name": "shuangliu12/cocoon",
"id": "8591b6cef346e4692d2f7fbf67ebc6b68af6dfaa",
"size": "217",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "30437"
},
{
"name": "HTML",
"bytes": "14948"
},
{
"name": "JavaScript",
"bytes": "8492"
},
{
"name": "Python",
"bytes": "217"
}
],
"symlink_target": ""
}
|
# missing parameter, ignored because a SQLAlchemy function is wrapped.
# it's a documented issue with that team.
# pylint: disable-msg=E1120
# Used * or ** magic, we're not getting rid of this, it's imperative to Trump.
# pylint: disable-msg=W0142
# Too many/few arguments, ignored, because its confusing and doesn't make
# sense to refactor templates.
#
# pylint: disable-msg=R0913
# pylint: disable-msg=R0903
"""
Trump's Object Relational Model is the glue to the framework, used to create
a Symbol's tags, alias, meta data, data feeds and their sources, munging,
error handling and validity instructions.
"""
# SQLAQ - running the uninstall script, then this script, in the same session
# causes an error:
#
# sqlalchemy.exc.InvalidRequestError: When initializing mapper
# Mapper|Feed|_feeds, expression 'FeedMeta' failed to locate a name
# ("name 'FeedMeta' is not defined"). If this is a class name, consider
# adding this relationship() to the <class 'trump.orm.Feed'> class
# after both dependent classes have been defined
#
# Why?
from pdb import set_trace as bp
import datetime as dt
import json
from dateutil.relativedelta import relativedelta as rd
import pandas as pd
from sqlalchemy import event, Table, Column, ForeignKey, ForeignKeyConstraint,\
String, Integer, Float, Boolean, DateTime, func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship, aliased
from sqlalchemy.orm.session import object_session
from sqlalchemy.exc import ProgrammingError, NoSuchTableError
from sqlalchemy.sql import and_, or_
from sqlalchemy import create_engine, distinct
from indexing import indexingtypes
from validity import validitychecks
from datadef import datadefs
from extensions.loader import sources
from trump.tools import ReprMixin, ProxyDict, isinstanceofany, \
BitFlag, BitFlagType, ReprObjType, DuckTypeMixin, new_alchemy_encoder
from trump.aggregation.symbol_aggs import FeedAggregator, sorted_feed_cols
from trump.templating import bFeed, pab, pnab
from trump.options import read_config, read_settings
from trump.converting import FXConverter
from handling import Handler
from reporting.objects import TrumpReport, FeedReport, SymbolReport, \
ReportPoint
BitFlag.associate_with(BitFlagType)
try:
ENGINE_STR = read_config(sect='readwrite', sett='engine')
except:
print ("Problem reading trump.cfg. Continuing using an in-memory "
"SQLlite database. Trump was not designed to work in-memory, "
"because, What's the point of non-persistent persistent objects?")
ENGINE_STR = "sqlite://"
try:
MONICKER = read_config(sect='about', sett='monicker')
except:
print ("Problem reading trump.cfg. Continuing using the monicker"
"defined in orm.py")
MONICKER = "unknown"
rbd = read_config(sect='options', sett='raise_by_default')
if rbd.upper() == 'TRUE':
rbd = BitFlag(1)
else:
rbd = None
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base = declarative_base()
ADO = "all, delete-orphan"
CC = {'onupdate': "CASCADE", 'ondelete': "CASCADE"}
class SymbolManager(object):
"""
The SymbolManager maintains the SQLAlchemy database session, and
provides access to object creation, deletion, searching, and
overrides/failsafes.
"""
def __init__(self, engine_or_eng_str=None, loud=False, echo=False):
"""
Parameters
----------
engine_or_eng_str : str or None, optional
Pass a SQLAlchemy engine, or a string. Without one,
it will use the string provided in trump/options/trump.cfg
If it fails to get a value there, an in-memory SQLlite
session would be created.
loud : bool, optional
Print information such as engine string used, defaults to False
echo : bool, optional
If a new engine is created, it will pass this to it'safes
constructor, enabling SQLAlchemy's echo mode.
Returns
-------
SymbolManager
"""
if engine_or_eng_str is None:
engine = create_engine(ENGINE_STR, echo=echo)
elif isinstance(engine_or_eng_str, (str, unicode)):
engine = create_engine(engine_or_eng_str, echo=echo)
else:
engine = engine_or_eng_str
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
self.loud = loud
if loud:
print "Using engine: {}".format(ENGINE_STR)
self.eng = engine
self.ses = DBSession()
def finish(self):
""" Closes the session with the database.
Call at the end of a trump session. It also
calls SessionManager.complete().
"""
self.complete()
self.ses.close()
def create(self, name, description=None, units=None,
agg_method="priority_fill", overwrite=False):
""" Create, or get if exists, a Symbol.
Parameters
----------
name : str
A symbol's name is a primary key, used across
the Trump ORM.
description : str, optional
An arbitrary string, used to store user information
related to the symbol.
units : str, optional
This is a string used to denote the units of the final
data Series.
agg_method : str, optional
The aggregation method, used to calculate
the final feed. Defaults to priority_fill.
overwrite : bool, optional
Set to True, to force deletion an existing symbol.
defaults to False.
Returns
-------
Symbol
"""
sym = self.try_to_get(name)
if sym is not None:
if overwrite:
print "Deleting {}".format(sym.name)
self.ses.delete(sym)
self.ses.commit()
else:
msg = 'Symbol {} already exists.\n' + \
'Consider setting overwrite to True.'
msg = msg.format(name)
raise Exception(msg)
sym = Symbol(name, description, units, agg_method)
self.ses.add(sym)
print "Creating {}".format(sym.name)
sym.add_alias(name)
sym.handle = SymbolHandle(sym=sym)
self.ses.commit()
return sym
def delete(self, symbol):
"""
Deletes a Symbol.
Parameters
----------
symbol : str or Symbol
"""
if isinstance(symbol, (str, unicode)):
sym = self.get(symbol)
elif isinstance(symbol, Symbol):
sym = symbol
else:
raise Exception("Invalid symbol {}".format((repr(symbol))))
# Has to handle the case where the table would exist already
# and where it wouldn't.
try:
sym.datatable = Table(sym.name, Base.metadata, autoload=True)
sym.datatable.drop(self.eng, checkfirst=True)
except NoSuchTableError:
print "No worries, {} never existed to begin with.".format(sym.name)
self.ses.delete(sym)
self.ses.commit()
def complete(self):
"""Commits any changes to the database.
In general, most of Trump API's auto-commits
or does so internally.
This is necessary when working directly with SQLAlchemy
exposed attributes.
"""
self.ses.commit()
def exists(self, symbol):
"""Checks to if a symbol exists, by name.
Parameters
----------
symbol : str or Symbol
Returns
-------
bool
"""
if isinstance(symbol, str):
sym = symbol
elif isinstance(symbol, Symbol):
sym = symbol.name
syms = self.ses.query(Symbol).filter(Symbol.name == sym).all()
if len(syms) == 0:
return False
else:
return True
def get(self, symbol):
""" Gets a Symbol based on name, which is expected to exist.
Parameters
----------
symbol : str or Symbol
Returns
-------
Symbol
Raises
------
Exception
If it does not exist. Use .try_to_get(),
if the symbol may or may not exist.
"""
syms = self.try_to_get(symbol)
if syms is None:
raise Exception("Symbol {} does not exist".format(symbol))
else:
return syms
def try_to_get(self, symbol):
""" Gets a Symbol based on name, which may or may not exist.
Parameters
----------
symbol : str
Returns
-------
Symbol or None.
Note
----
Use .get(), if the symbol should exist, and an exception
is needed if it doesn't.
"""
syms = self.ses.query(Symbol).filter(Symbol.name == symbol).all()
if len(syms) == 0:
return None
else:
return syms[0]
def existing_meta_attr(self):
qry = self.ses.query(SymbolMeta.attr).order_by(SymbolMeta.attr)
result = qry.distinct().all()
return [res[0] for res in result]
def search_meta(self, attr, value=None, stronly=False):
""" Get a list of Symbols by searching a specific meta
attribute, and optionally the value.
Parameters
----------
attr : str
The meta attribute to query.
value : None, str or list
The meta attribute to query. If you pass a float, or an int,
it'll be converted to a string, prior to searching.
stronly : bool, optional, default True
Return only a list of symbol names, as opposed
to the (entire) Symbol objects.
Returns
-------
List of Symbols or empty list
"""
if stronly:
qry = self.ses.query(Symbol.name).join(SymbolMeta)
else:
qry = self.ses.query(Symbol).join(SymbolMeta)
crits = []
if value is None:
crits.append(SymbolMeta.attr == attr)
else:
if isinstance(value, str):
values = [value]
elif isinstance(value, (tuple, list)):
values = value
for v in values:
crits.append(and_(SymbolMeta.attr == attr, SymbolMeta.value.like(value)))
if len(crits):
qry = qry.filter(or_(*crits))
qry = qry.order_by(Symbol.name)
if stronly:
return [sym[0] for sym in qry.distinct()]
else:
return [sym for sym in qry.distinct()]
def search(self, usrqry=None, name=False, desc=False, tags=False, meta=False, stronly=False, dolikelogic=True):
""" Get a list of Symbols by searching a combination of
a Symbol's name, description, tags or meta values.
Parameters
----------
usrqry : str
The string used to query. Appending '%' will use SQL's "LIKE"
functionality.
name : bool, optional, default False
Search by symbol name.
desc : bool, optional, default False
Search by symbol descriptions.
tags : bool, optional, default False
Search by symbol tags.
meta : bool, optional, default False
Search within a symbol's meta attribute's value.
stronly : bool, optional, default True
Return only a list of symbol names, as opposed
to the (entire) Symbol objects.
dolikelogic :
Append '%' to either side of the string, if the string
doesn't already have % specified.
Returns
-------
List of Symbols or empty list
"""
if stronly:
qry = self.ses.query(Symbol.name)
else:
qry = self.ses.query(Symbol)
if tags:
qry = qry.join(SymbolTag)
if meta:
qry = qry.join(SymbolMeta)
if dolikelogic:
if usrqry is not None:
if '%' not in usrqry:
usrqry = '%' + usrqry + '%'
crits = []
if name:
crits.append(Symbol.name.like(usrqry))
if tags:
crits.append(SymbolTag.tag.like(usrqry))
if desc:
crits.append(Symbol.description.like(usrqry))
if meta:
crits.append(SymbolMeta.value.like(usrqry))
if len(crits):
qry = qry.filter(or_(*crits))
qry = qry.order_by(Symbol.name)
if stronly:
return [sym[0] for sym in qry.distinct()]
else:
return [sym for sym in qry.distinct()]
def search_tag(self, tag, symbols=True, feeds=False):
""" Get a list of Symbols by searching a tag or partial tag.
Parameters
----------
tag : str
The tag to search. Appending '%' will use SQL's "LIKE"
functionality.
symbols : bool, optional
Search for Symbol's based on their tags.
feeds : bool, optional
Search for Symbol's based on their Feeds' tags.
Returns
-------
List of Symbols or empty list
"""
syms = []
if isinstance(tag, (str, unicode)):
tags = [tag]
else:
tags = tag
if symbols:
crits = []
for tag in tags:
if "%" in tag:
crit = SymbolTag.tag.like(tag)
else:
crit = SymbolTag.tag == tag
crits.append(crit)
qry = self.ses.query(SymbolTag)
qry = qry.filter(or_(*crits))
syms = qry.all()
syms = [tagged.symbol for tagged in syms]
if feeds:
crits = []
for tag in tags:
if "%" in tag:
crit = FeedTag.tag.like(tag)
else:
crit = FeedTag.tag == tag
crits.append(crit)
qry = self.ses.query(Symbol).select_from(FeedTag)
qry = qry.join(FeedTag.feed).join(Feed.symbol)
qry = qry.filter(or_(*crits))
fds = qry.distinct()
syms = syms + [sym for sym in fds]
return list(set(syms))
return syms
def search_meta_specific(self, **avargs):
"""Search list of Symbol objects by by querying specific
meta attributes and their respective values.
Parameters
----------
avargs
The attributes and values passed as key word arguments.
If more than one criteria is specified, AND logic is applied.
Appending '%' to values will use SQL's "LIKE" functionality.
Example
-------
>>> sm.search_meta(geography='Canada', sector='Gov%')
Returns
-------
List of Symbols or empty list
"""
qry = self.ses.query(Symbol).join(SymbolMeta.symbol)
for attr, value in avargs.iteritems():
SMA = aliased(SymbolMeta)
if "%" in value:
acrit = SMA.value.like(value)
else:
acrit = SMA.value == value
crit = and_(acrit, SMA.attr == attr)
qry = qry.filter(crit).join(SMA, SMA.symname == SymbolMeta.symname)
qry = qry.order_by(Symbol.name)
return qry.all()
def tag_counts(self):
""" Get a list of tags and the number of each.
Returns
-------
List of tuples, in order (tag, # of Symbols w/Tag)
"""
qry = self.ses.query(SymbolTag.tag, func.count(SymbolTag.tag))
qry = qry.group_by(SymbolTag.tag)
qry = qry.order_by(SymbolTag.tag)
tags = list(qry.all())
return tags
def bulk_cache_of_tag(self, tag):
""" Caches all the symbols by a certain tag.
For now, there is no different, than
caching each symbol individually. In the future,
this functionality could have speed improvements.
Parameters
----------
tag : str
Use '%' to enable SQL's "LIKE" functionality.
Returns
-------
TrumpReport
"""
syms = self.search_tag(tag)
name = 'Bulk Cache of Symbols tagged {}'.format(tag)
tr = TrumpReport(name)
for sym in syms:
sr = sym.cache()
tr.add_symbolreport(sr)
return tr
def build_view_from_tag(self, tag):
"""
Build a view of group of Symbols based on their tag.
Parameters
----------
tag : str
Use '%' to enable SQL's "LIKE" functionality.
Note
----
This function is written without SQLAlchemy,
so it only tested on Postgres.
"""
syms = self.search_tag(tag)
names = [sym.name for sym in syms]
subs = ["SELECT indx, '{}' AS symbol, final FROM {}".format(s, s) for s in names]
qry = " UNION ALL ".join(subs)
qry = "CREATE VIEW {} AS {};".format(tag, qry)
self.ses.execute("DROP VIEW IF EXISTS {};".format(tag))
self.ses.commit()
self.ses.execute(qry)
self.ses.commit()
def add_existing_ind_orfs(self, which, symbol, orfs_n, value, comment=None, user=None):
ind = self.ses.query(symbol.datatable.c.indx).order_by(symbol.datatable.c.indx).offset(orfs_n).limit(1).one()
ind = ind[0]
tmp = {'or' : 'override', 'fs' : 'failsafe'}
self._add_orfs(tmp[which], symbol, ind, value, user=user, comment=comment)
def _add_orfs(self, which, symbol, ind, val, dt_log=None, user=None, comment=None):
"""
Appends a single indexed-value pair, to a symbol object, to be
used during the final steps of the aggregation of the datatable.
See add_override and add_fail_safe.
Parameters
----------
which : str
Fail Safe or Override?
symbol : Symbol or str
The Symbol to apply the fail safe
ind : obj
The index value where the fail safe should be applied
val : obj
The data value which will be used in the fail safe
dt_log : datetime
A log entry, for saving when this fail safe was created.
user : str
A string representing which user made the fail safe
comment : str
A string to store any notes related to this fail safe.
"""
if not isinstance(symbol, (str, unicode)):
symbol = symbol.name
if not dt_log:
dt_log = dt.datetime.now()
if which.lower() == 'override':
qry = self.ses.query(func.max(Override.ornum).label('max_ornum'))
override = True
elif which.lower() == 'failsafe':
qry = self.ses.query(func.max(FailSafe.fsnum).label('max_fsnum'))
override = False
qry = qry.filter_by(symname = symbol)
cur_num = qry.one()
if cur_num[0] is None:
next_num = 0
else:
next_num = cur_num[0] + 1
if override:
tmp = Override(symname=symbol,
ind=ind,
val=val,
dt_log=dt_log,
user=user,
comment=comment,
ornum=next_num)
else:
tmp = FailSafe(symname=symbol,
ind=ind,
val=val,
dt_log=dt_log,
user=user,
comment=comment,
fsnum=next_num)
self.ses.add(tmp)
self.ses.commit()
def add_override(self, symbol, ind, val, dt_log=None, user=None, comment=None):
"""
Appends a single indexed-value pair, to a symbol object, to be
used during the final steps of the aggregation of the datatable.
With default settings Overrides, get applied with highest priority.
Parameters
----------
symbol : Symbol or str
The Symbol to override
ind : obj
The index value where the override should be applied
val : obj
The data value which will be used in the override
dt_log : datetime
A log entry, for saving when this override was created.
user : str
A string representing which user made the override
comment : str
A string to store any notes related to this override.
"""
self._add_orfs('override', symbol, ind, val, dt_log, user, comment)
def add_fail_safe(self, symbol, ind, val,
dt_log=None, user=None, comment=None):
"""
Appends a single indexed-value pair, to a symbol object, to be
used during the final steps of the aggregation of the datatable.
With default settings FailSafes, get applied with lowest priority.
Parameters
----------
symbol : Symbol or str
The Symbol to apply the fail safe
ind : obj
The index value where the fail safe should be applied
val : obj
The data value which will be used in the fail safe
dt_log : datetime
A log entry, for saving when this fail safe was created.
user : str
A string representing which user made the fail safe
comment : str
A string to store any notes related to this fail safe.
"""
self._add_orfs('failsafe', symbol, ind, val, dt_log, user, comment)
def delete_orfs(self, sym, which, orfs_num):
if which.lower() in ('or', 'override'):
crit = and_(Override.ornum == orfs_num, Override.symname == sym)
qry = self.ses.query(Override)
elif which.lower() in ('fs', 'failsafe'):
crit = and_(FailSafe.fsnum == orfs_num, FailSafe.symname == sym)
qry = self.ses.query(FailSafe)
else:
raise Exception("{} is not or/override or fs/failsafe".format(which))
qry.filter(crit).delete(synchronize_session=False)
self.ses.commit()
class ConversionManager(SymbolManager):
"""
A ConversionManager handles the conversion of previously instantiated
symbols, based on the object's units and the conversion manager
setup. The conversion is performed adhoc, in python
only usage. That is, nothing about the conversion persists
in the Trump framework. Only the final series is converted.
"""
def __init__(self, engine_or_eng_str=None, system='FX', tag=None):
"""
Parameters
----------
engine_or_eng_str : str or None
Pass a SQLAlchemy engine, or a string. Without one,
it will use the defaul provided in trump/options/trump.cfg
If it fails to get a value there, an in-memory SQLlite
session would be created.
system : str, optional
Uses the FX conversion system logic by default.
Currently, no other systems are implemented. Eg. metric-only,
imperial-metric, etc.
Other systems can be added after instantiation of the
ConversionManager, but the one specified at instantiation
will be used as default.
tag : str, optional
Tag for the set of feeds to use for conversion. Only necessary,
if the conversion system relies on it. For FX, it's needed, to
specify the set of feeds to use.
Other tags can be added after instantiation of the
ConversionManager, but the one specified at instantiation
will be used as default.
"""
super(ConversionManager, self).__init__(engine_or_eng_str)
self.default_system = system
self.default_tag = tag
self.converters = {}
self.add_converter(system, tag)
def add_converter(self, system, tag):
if system not in self.converters:
self.converters[system] = {}
if tag not in self.converters[system]:
if system == 'FX':
if tag is None:
raise Exception("Must specify a tag for FX Conversion")
conversion_syms = self.search_tag(tag)
conv = FXConverter()
conv.use_trump_data(conversion_syms)
self.converters[system][tag] = conv
def get_converted(self, symbol, units='CAD', system=None, tag=None):
"""
Uses a Symbol's Dataframe, to build a new Dataframe,
with the data converted to the new units
Parameters
----------
symbol : str or tuple of the form (Dataframe, str)
String representing a symbol's name, or a dataframe
with the data required to be converted. If supplying a
dataframe, units must be passed.
units : str, optional
Specify the units to convert the symbol to, default to CAD
system : str, optional
If None, the default system specified at instantiation
is used. System defines which conversion approach to take.
tag : str, optional
Tags define which set of conversion data is used. If None, the
default tag specified at instantiation is used.
"""
if isinstance(symbol, (str, unicode)):
sym = self.get(symbol)
df = sym.df
curu = sym.units
requ = units
elif isinstance(symbol, tuple):
df = symbol[0]
curu = symbol[1]
requ = units
else:
raise TypeError("Expected str or (DataFrame, str), found {}".format(type(symbol)))
system = system or self.default_system
tag = tag or self.default_tag
conv = self.converters[system][tag]
newdf = conv.convert(df, curu, requ)
newdf = pd.merge(df, newdf, left_index=True, right_index=True)
newdf = newdf[df.columns[0] + "_y"].to_frame()
newdf.columns = df.columns
return newdf
class Symbol(Base, ReprMixin):
__tablename__ = '_symbols'
name = Column('name', String, primary_key=True)
description = Column('description', String)
units = Column('units', String)
agg_method = Column('agg_method', String)
#minutes since last cache
freshthresh = Column('freshthresh', Integer, default=0)
log = relationship('SymbolLogEvent', lazy="dynamic", backref='_symbols', cascade=ADO)
index = relationship('Index', uselist=False, backref='_symbols',
cascade=ADO)
dtype = relationship('SymbolDataDef', uselist=False, backref='_symbols',
cascade=ADO)
handle = relationship("SymbolHandle", uselist=False, backref='_symbols',
cascade=ADO)
tags = relationship("SymbolTag", cascade=ADO)
aliases = relationship("SymbolAlias", cascade=ADO)
validity = relationship("SymbolValidity", cascade=ADO)
feeds = relationship("Feed", cascade=ADO)
meta = relationship("SymbolMeta", lazy='dynamic', cascade=ADO)
def __init__(self, name, description=None, units=None,
agg_method="PRIORITY_FILL",
indexname="UNNAMED", indeximp="DatetimeIndexImp",
freshthresh=0):
"""A Trump Symbol persistently objectifies indexed data
Use the SymbolManager class to create or retrieve existing symbols.
Parameters
----------
name : str
The name of the symbol to be added to the database, serves
as a primary key across the trump installation.
description : str, optional
a description of the symbol, just for notes.
units : str, optional
a string representing the units for the data.
agg_method : str, default PRIORITY_FILL
the method used for aggregating feeds, see
trump.aggregation.symbol_aggs.py for the list of available options.
indexname : str
a proprietary name assigned to the index.
indeximp : str
a string representing an index implementer (one of the classes in indexing.py)
freshthresh : int, default 0
number of minutes before the feed is considered stale.
"""
self.name = name
self.description = description
self.units = units
self.freshthresh = freshthresh
self.index = Index(indexname, indeximp, sym=name)
self.dtype = SymbolDataDef("SkipDataDef", sym=name)
self.agg_method = agg_method
self.datatable = None
self.datatable_exists = False
def to_json(self):
"""Returns the json representation of a Symbol object's tags, description, and meta data"""
return json.dumps(self, cls=new_alchemy_encoder(), check_circular=False)
def last_cache(self,result='COMPLETE'):
"""
The date and time of the previous cache.
Parameters
----------
result : string, default 'COMPLETE'
A string to choose which point in the log,
should be returned.
- COMPLETE - the last time a cache was completed
- STARTED - the last time a cache was started
Returns
-------
datetime.datetime
"""
crit = and_(SymbolLogEvent.event == 'CACHE',
SymbolLogEvent.evresult == result)
qry = self.log.filter(crit)
qry = qry.order_by(SymbolLogEvent.evtime.desc())
t = qry.first()
if t:
return t.evtime
else:
return None
def set_indexing(self, index_template):
"""
Update a symbol's indexing strategy
Parameters
----------
index_template : bIndex or bIndex-like
An index template used to overwrite all
details about the symbol's current index.
"""
objs = object_session(self)
if self.index.indimp != index_template.imp_name:
self._refresh_datatable_schema()
self.index.name = index_template.name
self.index.indimp = index_template.imp_name
self.index.case = index_template.case
self.index.setkwargs(**index_template.kwargs)
objs.commit()
def add_meta(self, **metadict):
"""Add meta information to a Symbol.
Parameters
----------
metadict
Attributes are passed as keywords, with their
associated values as strings. For meta attributes with spaces,
use an unpacked dict.
"""
objs = object_session(self)
for attr,val in metadict.iteritems():
newmeta = SymbolMeta(self, attr, val)
self.meta.append(newmeta)
objs.commit()
def add_validator(self, val_template):
"""
Creates and adds a SymbolValidity object to the Symbol.
Parameters
----------
validity_template : bValidity or bValidity-like
a validity template.
"""
validator = val_template.validator
args = []
for arg in SymbolValidity.argnames:
if arg in val_template.__dict__.keys():
args.append(getattr(val_template, arg))
objs = object_session(self)
qry = objs.query(func.max(SymbolValidity.vid).label('max_vid'))
qry = qry.filter_by(symname = self.name)
cur_vid = qry.one()[0]
if cur_vid is None:
next_vid = 0
else:
next_vid = cur_vid + 1
self.validity.append(SymbolValidity(self, next_vid, validator, *args))
objs.commit()
def update_handle(self, chkpnt_settings):
"""
Update a symbol's handle checkpoint settings
Parameters
----------
chkpnt_settings : dict
a dictionary where the keys are stings representing
individual handle checkpoint names, for a Symbol
(eg. caching_of_feeds, feed_aggregation_problem, ...)
See SymbolHandle.__table__.columns for the
current list.
The values can be either integer or BitFlags.
"""
# Note, for now, this function is nearly identical
# to the Feed version. Careful when augmenting,
# to get the right one.
objs = object_session(self)
# override with anything passed in
for checkpoint in chkpnt_settings:
if checkpoint in SymbolHandle.__table__.columns:
settings = chkpnt_settings[checkpoint]
setattr(self.handle, checkpoint, settings)
objs.commit()
def cache(self, checkvalidity=True, staleonly=False, allowraise=True):
""" Re-caches the Symbol's datatable by querying each Feed.
Parameters
----------
checkvalidity : bool, optional
Optionally, check validity post-cache. Improve speed by
turning to False.
staleonly : bool, default False
Set to True, for speed up, by looking at staleness
allowraise : bool, default True
AND with the Symbol.handle and Feed.handle's 'raise',
set to False, to do a list of symbols. Note, this
won't silence bugs in Trump, eg. unhandled edge cases.
So, those still need to be handled by the application.
Returns
-------
SymbolReport
"""
note = "staleonly = {}".format(staleonly)
self._log_an_event('CACHE','START',note)
docache = True
if staleonly:
lc = self.last_cache()
if lc:
freshthresh = self.freshthresh
nw = dt.datetime.now()
freshness = (nw - lc).total_seconds() / 60.0
if freshness <= freshthresh:
docache = False
smrp = SymbolReport(self.name)
if docache:
data = []
cols = ['final', 'override_feed000', 'failsafe_feed999']
if len(self.feeds) == 0:
err_msg = "Symbol has no Feeds. Can't cache a feed-less Symbol."
raise Exception(err_msg)
try:
datt = datadefs[self.dtype.datadef]
indtt = indexingtypes[self.index.indimp]
indkwargs = self.index.getkwargs()
indt = indtt(self.index.case, **indkwargs)
rp = ReportPoint('datadef', 'class', datt)
smrp.add_reportpoint(rp)
for afeed in self.feeds:
fdrp = afeed.cache(allowraise)
smrp.add_feedreport(fdrp)
tmp = datt(afeed.data).converted
tmp = indt.process_post_feed_cache(tmp)
data.append(tmp)
cols.append(afeed.data.name)
except:
point = "caching"
smrp = self._generic_exception(point, smrp, allowraise)
try:
data = pd.concat(data, axis=1)
except:
point = "concatenation"
smrp = self._generic_exception(point, smrp, allowraise)
# We shouldn't need to do anything here, as the concatenation
# should be smooth...
# preindlen = len(data)
#
#
# if preindlen > 0 :
# #indt = indtt(data, self.index.case, indkwargs)
# #data = indt.final_dataframe()
# data = indt.process_post_concat(data)
#
# postindlen = len(data)
# if postindlen == 0 and preindlen > 0:
# raise Exception("Indexing Implementer likely poorly designed")
# else:
# postindlen = 0
def build_hi_df(which, colname):
objs = object_session(self)
qry = objs.query(which.ind,
func.max(which.dt_log).label('max_dt_log'))
qry = qry.filter_by(symname = self.name)
grb = qry.group_by(which.ind).subquery()
qry = objs.query(which)
ords = qry.join((grb, and_(which.ind == grb.c.ind,
which.dt_log == grb.c.max_dt_log))).all()
if len(ords):
orind = [row.ind for row in ords]
orval = [row.val for row in ords]
ordf = indt.build_ordf(orind, orval, colname)
else:
ordf = pd.DataFrame(columns=[colname])
return ordf
ordf = build_hi_df(Override, 'override_feed000')
fsdf = build_hi_df(FailSafe, 'failsafe_feed999')
orfsdf = pd.merge(ordf, fsdf, how='outer', left_index=True, right_index=True)
data = pd.merge(orfsdf, data, how='outer', left_index=True, right_index=True)
data = indt.process_post_orfs(data)
try:
data = data.fillna(value=pd.np.nan)
data = data[sorted_feed_cols(data)]
data['final'] = FeedAggregator(self.agg_method).aggregate(data)
except:
point = "aggregation"
smrp = self._generic_exception(point, smrp, allowraise)
# SQLAQ There are several states to deal with at this point
# A) the datatable exists but a feed has been added
# B) the datatable doesn't exist and needs to be created
# C) the datatable needs to be updated for more or less feeds
# D) the datatable_exists flag is incorrect because all edge cases
# haven't been handled yet.
#
# My logic is that once Trump is more functional, I'll be able to
# eliminate this hacky solution. But, SQLAlchemy might have
# a more elegant answer. A check, of somekind prior to deletion?
# if not self.datatable_exists:
# self._init_datatable() #older version of _init_datatable
# delete(self.datatable).execute()
# self._init_datatable() #older version of _init_datatable
# Is this the best way to check?
# if engine.dialect.has_table(session.connection(), self.name):
# delete(self.datatable).execute()
self._refresh_datatable_schema()
if len(data) > 0:
data.index.name = 'indx'
data = data.reset_index()
datarecords = data.to_dict(orient='records')
objs = object_session(self)
objs.execute(self.datatable.insert(), datarecords)
objs.commit()
if checkvalidity:
try:
isvalid, reports = self.check_validity(report=True)
for rep in reports:
smrp.add_reportpoint(rep)
if not isvalid:
raise Exception('{} is not valid'.format(self.name))
except:
point = "validity_check"
smrp = self._generic_exception(point, smrp, allowraise)
self._log_an_event('CACHE','COMPLETE', "Fresh!")
else:
self._log_an_event('CACHE','FRESH', "Was still fresh")
return smrp
def check_validity(self, checks=None, report=True):
""" Runs a Symbol's validity checks.
Parameters
----------
checks : str, [str,], optional
Only run certain checks.
report : bool, optional
If set to False, the method will return only the result of the
check checks (True/False). Set to True, to have a
SymbolReport returned as well.
Returns
-------
Bool, or a Tuple of the form (Bool, SymbolReport)
"""
if report:
reportpoints = []
allchecks = []
checks_specified=False
if isinstance(checks, (str, unicode)):
checks = [checks]
checks_specified = True
elif isinstance(checks, (list, tuple)):
checks_specified = True
else:
checks = []
for val in self.validity:
if (val.validator in checks) or (not checks_specified):
ValCheck = validitychecks[val.validator]
anum = ValCheck.__init__.func_code.co_argcount - 2
args = []
for arg in SymbolValidity.argnames:
args.append(getattr(val, arg))
valid = ValCheck(self.datatable_df, *args[:anum])
res = valid.result
allchecks.append(res)
rp = ReportPoint('validation', val.validator, res, str(args[:anum]))
reportpoints.append(rp)
if report:
return all(allchecks), reportpoints
else:
return all(allchecks)
@property
def isvalid(self):
"""Quick access to the results of a a check_validity report
Returns
-------
Bool
"""
return self.check_validity(report=False)
@property
def describe(self):
""" describes a Symbol, returns a string """
lines = []
lines.append("Symbol = {}".format(self.name))
if len(self.tags):
tgs = ", ".join(x.tag for x in self.tags)
lines.append(" tagged = {}".format(tgs))
if len(self.aliases):
als = ", ".join(x.alias for x in self.aliases)
lines.append(" aliased = {}".format(als))
if len(self.feeds):
lines.append(" feeds:")
for fed in self.feeds:
lines.append(" {}. {}".format(fed.fnum,
fed.ftype))
return "\n".join(lines)
def del_tags(self, tags):
""" remove a tag or tags from a symbol
Parameters
----------
tags : str or [str,]
Tags to be removed
"""
# SQLA Adding a SymbolTag object, feels awkward/uneccessary.
# Should I be implementing this functionality a different way?
if isinstance(tags, (str, unicode)):
tags = [tags]
objs = object_session(self)
docommit = False
for symboltag in self.tags:
if symboltag.tag in tags:
objs.delete(symboltag)
docommit = True
if docommit:
objs.commit()
def add_tags(self, tags):
""" add a tag or tags to a symbol
Parameters
----------
tags : str or [str,]
Tags to be added
"""
# SQLA Adding a SymbolTag object, feels awkward/uneccessary.
# Should I be implementing this functionality a different way?
if isinstance(tags, (str, unicode)):
tags = [tags]
objs = object_session(self)
tmps = [SymbolTag(tag=t, sym=self) for t in tags]
objs.add_all(tmps)
objs.commit()
def _log_an_event(self, event, evresult='No Result', note='No Note'):
""" log an event
Parameters
----------
event : string
evresult : string
note : string
"""
objs = object_session(self)
evnt = SymbolLogEvent(event, evresult, note, sym=self.name)
objs.add(evnt)
objs.commit()
@property
def n_tags(self):
""" returns the number of tags """
return len(self.tags)
def add_feed(self, feedlike, **kwargs):
""" Add a feed to the Symbol
Parameters
----------
feedlike : Feed or bFeed-like
The feed template, or Feed object to be added.
kwargs
Munging instructions
"""
if 'fnum' in kwargs:
fnum = kwargs['fnum']
del kwargs['fnum']
else:
fnum = None
if isinstance(feedlike, bFeed):
munging = feedlike.munging
if 'munging' in kwargs:
explicit_munging = kwargs['munging'].as_odict
for key in explicit_munging:
munging[key] = explicit_munging[key]
fed = Feed(self, feedlike.ftype,
feedlike.sourcing,
munging,
feedlike.meta,
fnum)
elif isinstance(feedlike, Feed):
fed = feedlike
else:
raise Exception("Invalid Feed {}".format(repr(feedlike)))
self.feeds.append(fed)
objs = object_session(self)
objs.add(fed)
objs.commit()
def add_alias(self, alias):
""" Add an alias to a Symbol
Parameters
----------
alias : str
The alias
"""
objs = object_session(self)
if isinstance(alias, list):
raise NotImplementedError
elif isinstanceofany(alias, (str, unicode)):
a = SymbolAlias(self, alias)
self.aliases.append(a)
objs.add(a)
def _final_data(self):
"""
Returns
-------
A list of tuples representing rows from the datatable's index
and final column, sorted accordingly.
"""
dtbl = self.datatable
objs = object_session(self)
if isinstance(dtbl, Table):
return objs.query(dtbl.c.indx, dtbl.c.final).all()
else:
raise Exception("Symbol has no datatable, likely need to cache first.")
def _max_min(self):
"""
Returns
-------
A tuple consisting of (max, min) of the index.
"""
dtbl = self.datatable
objs = object_session(self)
if isinstance(dtbl, Table):
return objs.query(func.max(dtbl.c.indx).label("max_indx"),
func.min(dtbl.c.indx).label("min_indx")).one()
else:
raise Exception("Symbol has no datatable")
def _all_datatable_data(self):
"""
Returns
-------
A list of tuples representing rows from all columns of the datatable,
sorted accordingly.
"""
dtbl = self.datatable
objs = object_session(self)
imcols = [dtbl.c.indx, dtbl.c.final, dtbl.c.override_feed000, dtbl.c.failsafe_feed999]
cols = imcols[:3] + [c for c in dtbl.c if c not in (imcols)] + [imcols[3]]
if isinstance(dtbl, Table):
return objs.query(*cols).order_by(dtbl.c.indx).all()
else:
raise Exception("Symbol has no datatable")
@property
def df(self):
"""
Note: this accessor is read-only. It should be copied, if accessed in
an application, more than once.
Returns
-------
Dataframe of the symbol's final data.
"""
data = self._final_data()
if len(data) == 0:
adf = pd.DataFrame(columns = [self.index.name, self.name])
return adf.set_index(self.index.name)
adf = pd.DataFrame(data)
if len(adf.columns) != 2:
msg = "Symbol ({}) needs to be cached prior to building a Dataframe"
msg = msg.format(self.name)
raise Exception(msg)
adf.columns = [self.index.name, self.name]
return self._finish_df(adf, 'FINAL')
@property
def datatable_df(self):
""" returns the dataframe representation of the symbol's final data """
data = self._all_datatable_data()
adf = pd.DataFrame(data)
adf.columns = self.dt_all_cols
return self._finish_df(adf, 'ALL')
def _finish_df(self, adf, mode):
datt = datadefs[self.dtype.datadef]
if mode == 'ALL':
for col in adf.columns:
adf[col] = datt(adf[col]).converted
adf = adf.set_index('indx')
elif mode == 'FINAL':
adf[self.name] = datt(adf[self.name]).converted
adf = adf.set_index(self.index.name)
indtt = indexingtypes[self.index.indimp]
indt = indtt(self.index.case, **self.index.getkwargs())
adf = indt.process_post_db(adf)
if adf.index.name == "UNNAMED":
adf.index.name = None
else:
adf.index.name = self.index.name
return adf
def del_feed(self):
""" remove a feed """
raise NotImplementedError("Feed deletion has not be created yet")
@property
def n_feeds(self):
""" returns the number of feeds """
return len(self.feeds)
def set_description(self, description):
""" change the description of the symbol """
self.description = description
def set_units(self, units):
""" change the symbol's units """
self.units = units
def _init_datatable(self):
"""
Instantiates the .datatable attribute, pointing to a table in the
database that stores all the cached data
"""
try:
self.datatable = Table(self.name, Base.metadata, autoload=True)
except NoSuchTableError:
print "Creating datatable, cause it doesn't exist"
self.datatable = self._datatable_factory()
self.datatable.create()
self.datatable_exists = True
def _refresh_datatable_schema(self):
objs = object_session(self)
if objs.connection().engine.dialect.name == 'postgresql':
objs.execute("DROP TABLE IF EXISTS {} CASCADE;".format(self.name))
objs.commit()
# We still
self.datatable = self._datatable_factory()
self.datatable.drop(checkfirst=True)
self.datatable.create(checkfirst=True)
self.datatable_exists = True
objs.commit()
def _datatable_factory(self):
"""
creates a SQLAlchemy Table object with the appropriate number of
columns given the number of feeds
"""
feed_cols = ['feed{0:03d}'.format(i + 1) for i in range(self.n_feeds)]
feed_cols = ['override_feed000'] + feed_cols + ['failsafe_feed999']
ind_sqlatyp = indexingtypes[self.index.indimp].sqlatyp
dat_sqlatyp = datadefs[self.dtype.datadef].sqlatyp
atbl = Table(self.name, Base.metadata,
Column('indx', ind_sqlatyp, primary_key=True),
Column('final', dat_sqlatyp),
*(Column(fed_col, dat_sqlatyp) for fed_col in feed_cols),
extend_existing=True)
self.dt_feed_cols = feed_cols[:]
self.dt_all_cols = ['indx', 'final'] + feed_cols[:]
return atbl
def _generic_exception(self, point, reporter, allowraise=True):
logic = getattr(self.handle, point).asdict()
logic['raise'] = logic['raise'] and allowraise
logic = BitFlag(logic)
msg = "Exception at the point of {} for {}"
msg = msg.format(point, self.name)
hdlrp = Handler(logic, point, msg).process()
if hdlrp:
reporter.add_handlepoint(hdlrp)
return reporter
@property
def meta_map(self):
return ProxyDict(self, 'meta', SymbolMeta, 'attr')
def existing_orfs(self):
objs = object_session(self)
#.order_by(Override.ornum.desc())
ors = objs.query(Override).filter(Override.symname == self.name).order_by(Override.ornum.asc()).all()
#.order_by(FailSafe.fsnum.desc())
fss = objs.query(FailSafe).filter(FailSafe.symname == self.name).order_by(FailSafe.fsnum.asc()).all()
return {'or' : ors, 'fs' : fss}
@event.listens_for(Symbol, 'load')
def __receive_load(target, context):
""" loads a symbols datatable upon being queried """
target._init_datatable()
def set_symbol_or_symname(self, sym):
if isinstance(sym, (str, unicode)):
setattr(self, "symname", sym)
else:
setattr(self, "symbol", sym)
class SymbolLogEvent(Base, ReprMixin):
__tablename__ = '_symbol_log_events'
evtime = Column('evtime', DateTime, primary_key=True)
symname = Column('symname', String, ForeignKey('_symbols.name', **CC),
primary_key=True)
event = Column('event', String)
evresult = Column('evresult', String)
monicker = Column('monicker', String)
note = Column('note', String)
def __init__(self, event, evresult='No Result', note='No Note', sym=None):
set_symbol_or_symname(self, sym)
self.event = event
self.evresult = evresult
self.monicker = MONICKER
self.note = note
self.evtime = dt.datetime.now()
class SymbolTag(Base, ReprMixin):
__tablename__ = '_symbol_tags'
symname = Column('symname', String, ForeignKey('_symbols.name', **CC),
primary_key=True)
tag = Column('tag', String, primary_key=True)
symbol = relationship("Symbol")
def __init__(self, tag, sym=None):
set_symbol_or_symname(self, sym)
self.tag = tag
class SymbolMeta(Base, ReprMixin):
__tablename__ = "_symbol_meta"
symname = Column('symname', String, ForeignKey("_symbols.name", **CC),
primary_key=True)
attr = Column('attr', String, primary_key=True)
value = Column('value', String)
symbol = relationship("Symbol")
def __init__(self, symbol, attr, value):
self.symbol = symbol
self.attr = attr
self.value = value
class SymbolDataDef(Base, ReprMixin):
__tablename__ = "_symbol_datadef"
symname = Column('symname', String, ForeignKey("_symbols.name", **CC),
primary_key=True)
datadef = Column("datadef", String, nullable=False)
"""string representing a :py:class:`~trump.datadef.DataDefiner`."""
def __init__(self, datadef, sym=None):
set_symbol_or_symname(self, sym)
self.datadef = datadef
class SymbolAlias(Base, ReprMixin):
__tablename__ = '_symbol_aliases'
symname = Column('symname', String, ForeignKey('_symbols.name', **CC),
primary_key=True)
alias = Column('alias', String, primary_key=True)
symbol = relationship("Symbol")
def __init__(self, symbol, alias):
self.symbol = symbol
self.alias = alias
class SymbolValidity(Base, ReprMixin):
__tablename__ = "_symbol_validity"
symname = Column('symname', String, ForeignKey("_symbols.name", **CC),
primary_key=True)
vid = Column('vid', Integer, primary_key=True, nullable=False)
validator = Column('validator', String, nullable=False)
argnames = ['arg' + a for a in list('abcde')]
arga = Column('arga', ReprObjType)
argb = Column('argb', ReprObjType)
argc = Column('argc', ReprObjType)
argd = Column('argd', ReprObjType)
arge = Column('arge', ReprObjType)
symbol = relationship("Symbol")
def __init__(self, symbol, vid, validator, *args):
set_symbol_or_symname(self, symbol)
self.vid = vid
self.validator = validator
pads = [None] * (len(self.argnames) - len(args))
argvals = list(args) + pads
for i, arg in enumerate(self.argnames):
setattr(self, arg, argvals[i])
class SymbolHandle(Base, ReprMixin):
"""
Stores instructions about how to handle exceptions thrown
during specific points of Symbol caching:
.. code-block:: python
sh = SymbolHandle({'aggregation' : BitFlag(36)}, aSymbol)
>>> sh.aggregation['email']
True
"""
__tablename__ = "_symbol_handle"
symname = Column('symname', String, ForeignKey("_symbols.name", **CC),
primary_key=True)
caching = Column('caching', BitFlagType)
concatenation = Column('concatenation', BitFlagType)
aggregation = Column('aggregation', BitFlagType)
validity_check = Column('validity_check', BitFlagType)
symbol = relationship("Symbol")
def __init__(self, chkpnt_settings=None, sym=None):
"""
Parameters
----------
chkpnt_settings : dict
A dictionary with keys matching names of the handle points
and the values either integers or BitFlags
sym : str or Symbol
The Symbol that this SymbolHandle is associated with it.
"""
set_symbol_or_symname(self, sym)
self.caching = rbd or BitFlag(0)
self.concatenation = rbd or BitFlag(['raise'])
self.aggregation = rbd or BitFlag(['stdout'])
self.validity_check = rbd or BitFlag(['report'])
chkpnt_settings = chkpnt_settings or {}
# override with anything passed in settings
for checkpoint in chkpnt_settings:
if checkpoint in SymbolHandle.__table__.columns:
settings = chkpnt_settings[checkpoint]
setattr(self, checkpoint, settings)
def setting(self, handlepoint):
return getattr(self, handlepoint)
@property
def points(self):
pnts = [str(p).split(".")[1] for p in SymbolHandle.__table__.columns if 'symname' not in str(p)]
return [(pnt, getattr(self, pnt)) for pnt in pnts]
class Index(Base, ReprMixin):
__tablename__ = "_indicies"
symname = Column('symname', String, ForeignKey("_symbols.name", **CC),
primary_key=True)
name = Column("name", String, nullable=False)
"""string to name the index, only used when serving."""
indimp = Column("indimp", String, nullable=False)
"""string representing a :py:class:`~trump.indexing.IndexImplementer`."""
case = Column("case", String)
"""string used in a :class:`~.indexing.IndexImplementer` switch statement."""
kwargs = relationship("IndexKwarg", lazy="dynamic", cascade=ADO)
def __init__(self, name, indimp, case=None, kwargs=None, sym=None):
set_symbol_or_symname(self, sym)
self.name = name
self.indimp = indimp
self.case = case or "asis"
kwargs = kwargs or {}
self.setkwargs(**kwargs)
def setkwargs(self, **kwargs):
self.kwargs = []
if kwargs is not None:
list_of_kwargs = []
for kword, val in kwargs.iteritems():
list_of_kwargs.append(IndexKwarg(kword, val))
self.kwargs = list_of_kwargs
else:
self.kwargs = []
def getkwargs(self):
kwargs = {}
for indkw in self.kwargs:
kwargs[indkw.kword] = indkw.val
return kwargs
class IndexKwarg(Base, ReprMixin, DuckTypeMixin):
__tablename__ = "_index_kwargs"
symname = Column('symname', String, ForeignKey('_indicies.symname', **CC),
primary_key=True)
kword = Column('kword', String, primary_key=True)
_colswitch = Column('colswitch', Integer)
boolcol = Column('boolcol', Boolean)
strcol = Column('strcol', String)
intcol = Column('intcol', Integer)
floatcol = Column('floatcol', Float)
reprcol = Column('reprcol', ReprObjType)
def __init__(self, kword, val):
self.kword = kword
self.setval(val)
class Feed(Base, ReprMixin):
"""
The Feed object stores parameters associated with souring and munging
a single series.
"""
__tablename__ = "_feeds"
symname = Column('symname', String, ForeignKey("_symbols.name", **CC),
primary_key=True)
fnum = Column('fnum', Integer, primary_key=True)
state = Column('state', String, nullable=False)
ftype = Column('ftype', String, nullable=False)
handle = relationship("FeedHandle", uselist=False, backref='_feeds',
cascade=ADO)
tags = relationship("FeedTag", cascade=ADO)
sourcing = relationship("FeedSource", lazy="dynamic", cascade=ADO)
meta = relationship("FeedMeta", lazy="dynamic", cascade=ADO)
munging = relationship("FeedMunge", lazy="dynamic", cascade=ADO)
symbol = relationship("Symbol")
def __init__(self, symbol, ftype, sourcing,
munging=None, meta=None, fnum=None):
self.ftype = ftype
self.state = "ON"
self.symbol = symbol
self.data = None
self.ses = object_session(symbol)
if fnum is None:
qry = self.ses.query(Feed.fnum)
existing_fnums = qry.filter(Feed.symname == symbol.name).all()
existing_fnums = [n[0] for n in existing_fnums]
if len(existing_fnums) == 0:
self.fnum = 0
else:
self.fnum = max(existing_fnums) + 1
else:
self.fnum = fnum
if meta:
for key in meta:
tmp = FeedMeta(attr=key, value=meta[key], feed=self)
self.ses.add(tmp)
self.meta_map[key] = tmp
self.ses.commit()
if sourcing:
sk = None
if 'sourcing_key' in meta:
sk = meta['sourcing_key']
fsrc = FeedSource(meta['stype'], sk, self)
for key in sourcing:
if key not in ('stype', 'sourcing_key'):
fsrckw = FeedSourceKwarg(key, sourcing[key], fsrc)
fsrc.sourcekwargs.append(fsrckw)
self.sourcing.append(fsrc)
self.ses.commit()
if munging:
for i, meth in enumerate(munging.keys()):
fmg = FeedMunge(order=i, mtype=munging[meth]['mtype'],
method=meth, feed=self)
for arg, value in munging[meth]['kwargs'].iteritems():
if not isinstance(value, (int, float)):
val = str(value)
else:
val = value
fmg.mungeargs.append(FeedMungeKwarg(arg, val, feedmunge=fmg))
self.munging.append(fmg)
self.ses.commit()
self.handle = FeedHandle(feed=self)
self.ses.commit()
def update_handle(self, chkpnt_settings):
"""
Update a feeds's handle checkpoint settings
:param chkpnt_settings, dict:
a dictionary where the keys are stings representing
individual handle checkpoint names, for a Feed
(eg. api_failure, feed_type, monounique...)
See FeedHandle.__table__.columns for the
current list.
The values can be either integer or BitFlags.
:return: None
"""
# Note, for now, this function is nearly identical
# to the Symbol version. Careful when augmenting,
# to get the right one.
objs = object_session(self)
# override with anything passed in
for checkpoint in chkpnt_settings:
if checkpoint in FeedHandle.__table__.columns:
settings = chkpnt_settings[checkpoint]
setattr(self.handle, checkpoint, settings)
objs.commit()
def add_tags(self, tags):
""" add a tag or tags to a Feed """
if isinstance(tags, (str, unicode)):
tags = [tags]
objs = object_session(self)
tmps = [FeedTag(tag=t, feed=self) for t in tags]
objs.add_all(tmps)
objs.commit()
def cache(self, allowraise):
fdrp = FeedReport(self.fnum)
src = self.sourcing.one()
srckeys = src.sourcing_map.keys()
kwargs = {k: src.sourcing_map[k].val for k in srckeys}
sourcing_key = src.sourcing_key
stype = src.stype
# If there is a sourcing key defined, use it to override any database
# defined parameters
if sourcing_key:
sourcing_overrides = read_settings()[stype][sourcing_key]
for key in sourcing_overrides:
kwargs[key] = sourcing_overrides[key]
rp = ReportPoint('readmeta', 'sourcing', stype, str(kwargs))
fdrp.add_reportpoint(rp)
try:
if stype in sources:
self.data = sources[stype](self.ses, **kwargs)
else:
msg = "Unknown Source Type : {} in {} Feed #{}"
raise Exception(msg.format(stype, self.symname, self.fnum))
except:
point = "api_failure"
fdrp = self._generic_exception(point, fdrp, allowraise)
self.data = pd.Series()
try:
if self.data is None:
raise Exception('{} Feed #{} is None'.format(self.symname, self.fnum))
if not isinstance(self.data, pd.Series):
raise Exception('{} Feed #{} did not return a Series ({})'.format(self.symname, self.fnum, type(self.data)))
except:
point = "feed_type"
fdrp = self._generic_exception(point, fdrp, allowraise)
self.data = pd.Series()
try:
if not self.data.index.is_unique:
dtstr, indstr = str(self.data), str(self.data.index)
msg = 'Feed #{} in {} index is not unique:' + dtstr + indstr
raise Exception(msg.format(self.fnum, self.symname))
if not self.data.index.is_monotonic:
dtstr, indstr = str(self.data), str(self.data.index)
msg = 'Feed #{} in {} index is not monotonic:' + dtstr + indstr
raise Exception(msg.format(self.fnum, self.symname))
except:
point = "monounique"
fdrp = self._generic_exception(point, fdrp, allowraise)
self.data = pd.Series()
# munge accordingly
print "Munging..."
print self.data.tail(5)
for mgn in self.munging:
#print mgn
#print mgn.munging_map.keys()
mmkeys = mgn.munging_map.keys()
kwargs = {k: mgn.munging_map[k].val for k in mmkeys}
if mgn.mtype == pab:
afunc = getattr(self.data, mgn.method)
self.data = afunc(**kwargs)
elif mgn.mtype == pnab:
lib = __import__('pandas', globals(), locals(), [], -1)
afunc = getattr(lib, mgn.method)
self.data = afunc(self.data, **kwargs)
# make sure it's named properly...
self.data.name = "feed" + str(self.fnum + 1).zfill(3)
rp = ReportPoint('finish', 'cache', True, self.data.tail(3))
fdrp.add_reportpoint(rp)
return fdrp
@property
def meta_map(self):
return ProxyDict(self, 'meta', FeedMeta, 'attr')
@property
def source(self):
return " ".join([p.key + " : " + p.value for p in self.sourcing])
def _generic_exception(self, point, reporter, allowraise=True):
logic = getattr(self.handle, point).asdict()
logic['raise'] = logic['raise'] and allowraise
logic = BitFlag(logic)
msg = "Exception for feed #{} for {} at the {} point."
msg = msg.format(self.fnum, self.symname, point)
hdlrp = Handler(logic, point, msg).process()
if hdlrp:
reporter.add_handlepoint(hdlrp)
return reporter
def _note_session(self):
self.ses = object_session(self)
@event.listens_for(Feed, 'load')
def __receive_load(target, context):
""" saves the session upon being queried """
target._note_session()
class FeedTag(Base, ReprMixin):
__tablename__ = '_feed_tags'
symname = Column('symname', String, primary_key=True)
fnum = Column('fnum', Integer, primary_key=True)
tag = Column('tag', String, primary_key=True)
feed = relationship("Feed")
fkey = ForeignKeyConstraint([symname, fnum],
[Feed.symname, Feed.fnum],
**CC)
__table_args__ = (fkey, {})
def __init__(self, tag, feed=None):
self.feed = feed
self.tag = tag
class FeedSource(Base, ReprMixin):
__tablename__ = "_feed_sourcing"
symname = Column('symname', String, primary_key=True)
fnum = Column('fnum', Integer, primary_key=True)
stype = Column('stype', String)
sourcing_key = Column('sourcing_key', String)
feed = relationship("Feed")
sourcekwargs = relationship("FeedSourceKwarg", lazy="dynamic", cascade=ADO)
fkey = ForeignKeyConstraint([symname, fnum],
[Feed.symname, Feed.fnum],
**CC)
__table_args__ = (fkey, {})
def __init__(self, stype, sourcing_key, feed):
self.stype = stype
self.sourcing_key = sourcing_key
self.feed = feed
@property
def sourcing_map(self):
return ProxyDict(self, 'sourcekwargs', FeedSourceKwarg, 'kword')
class FeedSourceKwarg(Base, ReprMixin, DuckTypeMixin):
__tablename__ = "_feed_sourcing_kwargs"
symname = Column('symname', String, primary_key=True)
fnum = Column('fnum', Integer, primary_key=True)
kword = Column('kword', String, primary_key=True)
_colswitch = Column('colswitch', Integer)
boolcol = Column('boolcol', Boolean)
strcol = Column('strcol', String)
intcol = Column('intcol', Integer)
floatcol = Column('floatcol', Float)
reprcol = Column('reprcol', ReprObjType)
feedsource = relationship("FeedSource")
fkey = ForeignKeyConstraint([symname, fnum],
[FeedSource.symname,
FeedSource.fnum])
__table_args__ = (fkey, {})
def __init__(self, kword, val, feedsource):
self.kword = kword
self.setval(val)
self.feedsource = feedsource
class FeedMeta(Base, ReprMixin):
__tablename__ = "_feed_meta"
symname = Column('symname', String, primary_key=True)
fnum = Column('fnum', Integer, primary_key=True)
attr = Column('attr', String, primary_key=True)
feed = relationship("Feed")
value = Column('value', String)
fkey = ForeignKeyConstraint([symname, fnum],
[Feed.symname, Feed.fnum],
**CC)
__table_args__ = (fkey, {})
def __init__(self, feed, attr, value):
self.feed = feed
self.attr = attr
self.value = value
class FeedMunge(Base, ReprMixin):
__tablename__ = "_feed_munging"
symname = Column('symname', String, primary_key=True)
fnum = Column('fnum', Integer, primary_key=True)
order = Column('order', Integer, primary_key=True)
mtype = Column('mtype', String)
method = Column('method', String)
feed = relationship("Feed")
mungeargs = relationship("FeedMungeKwarg", lazy="dynamic", cascade=ADO)
fkey = ForeignKeyConstraint([symname, fnum],
[Feed.symname, Feed.fnum])
__table_args__ = (fkey, {})
def __init__(self, order, mtype, method, feed):
self.order = order
self.method = method
self.mtype = mtype
self.feed = feed
@property
def munging_map(self):
return ProxyDict(self, 'mungeargs', FeedMungeKwarg, 'kword')
class FeedMungeKwarg(Base, ReprMixin, DuckTypeMixin):
__tablename__ = "_feed_munging_kwargs"
symname = Column('symname', String, primary_key=True)
fnum = Column('fnum', Integer, primary_key=True)
order = Column('order', Integer, primary_key=True)
kword = Column('kword', String, primary_key=True)
_colswitch = Column('colswitch', Integer)
boolcol = Column('boolcol', Boolean)
strcol = Column('strcol', String)
intcol = Column('intcol', Integer)
floatcol = Column('floatcol', Float)
reprcol = Column('reprcol', ReprObjType)
feedmunge = relationship("FeedMunge")
fkey = ForeignKeyConstraint([symname, fnum, order],
[FeedMunge.symname,
FeedMunge.fnum,
FeedMunge.order])
__table_args__ = (fkey, {})
def __init__(self, kword, val, feedmunge):
self.kword = kword
self.setval(val)
self.feedmunge = feedmunge
class FeedHandle(Base, ReprMixin):
"""
Stores instructions about specific handle points during
Feed caching:
.. code-block:: python
fh = FeedHandle({'api_failure' : BitFlag(36)}, aSymbol.feeds[0])
>>> fh.api_failure['email']
True
"""
__tablename__ = "_feed_handle"
symname = Column('symname', String, primary_key=True)
fnum = Column('fnum', Integer, primary_key=True)
api_failure = Column('api_failure', BitFlagType)
feed_type = Column('feed_type', BitFlagType)
index_type_problem = Column('index_type_problem', BitFlagType)
index_property_problem = Column('index_property_problem', BitFlagType)
data_type_problem = Column('data_type_problem', BitFlagType)
monounique = Column('monounique', BitFlagType)
feed = relationship("Feed")
fkey = ForeignKeyConstraint([symname, fnum],
[Feed.symname, Feed.fnum])
__table_args__ = (fkey, {})
def __init__(self, chkpnt_settings=None, feed=None):
"""
:param chkpnt_settings: dict
A dictionary with keys matchin names of the handle points
and the values either integers or BitFlags
:param feed: Feed
The feed that this FeedHandle is associated with it.
"""
self.feed = feed
self.api_failure = rbd or BitFlag(['raise'])
self.feed_type = rbd or BitFlag(['stdout', 'report'])
self.index_type_problem = rbd or BitFlag(['stdout', 'report'])
self.index_property_problem = rbd or BitFlag(['stdout'])
self.data_type_problem = rbd or BitFlag(['stdout', 'report'])
self.monounique = rbd or BitFlag(['raise'])
chkpnt_settings = chkpnt_settings or {}
# override with anything passed in settings
for checkpoint in chkpnt_settings:
if checkpoint in FeedHandle.__table__.columns:
settings = chkpnt_settings[checkpoint]
setattr(self, checkpoint, settings)
def setting(self, handlepoint):
return getattr(self, handlepoint)
@property
def points(self):
exclude = ['symname', 'fnum']
pnts = [str(p).split(".")[1] for p in FeedHandle.__table__.columns if not any((ex in str(p) for ex in exclude))]
return [(pnt, getattr(self, pnt)) for pnt in pnts]
class Override(Base, ReprMixin):
"""
An Override represents a single datapoint with an associated
index value, applied to a Symbol's datatable after sourcing all the
data, and will be applied after any aggregation logic
"""
__tablename__ = '_overrides'
symname = Column('symname', String, primary_key=True)
""" symbol name, for the override"""
ornum = Column('ornum', Integer, primary_key=True)
""" Override number, uniquely assigned to every override"""
ind = Column('ind', ReprObjType, nullable=False)
""" the repr of the object used in the Symbol's index."""
val = Column('val', ReprObjType, nullable=False)
""" the repr of the object used as the Symbol's value."""
dt_log = Column('dt_log', DateTime, nullable=False)
""" datetime that the override was created"""
user = Column('user', String, nullable=True)
""" user name or process name that created the override"""
comment = Column('comment', String, nullable=True)
""" a user field to store an arbitrary string about the override"""
# make a constructor just so sphinx doesn't pick up the
# base's __init__'s doc string.
def __init__(self, *args, **kwargs):
super(Override, self).__init__(*args, **kwargs)
class FailSafe(Base, ReprMixin):
"""
A FailSafe represents a single datapoint with an associated
index value, applied to a Symbol's datatable after sourcing all the
data, and will be applied after any aggregation logic, only
where no other datapoint exists. It's a back-up datapoint,
used only by Trump, when an NA exists.
.. note::
only datetime based indices with float-based data currently work with
Overrides
"""
__tablename__ = '_failsafes'
symname = Column('symname', String, primary_key=True)
""" symbol name, for the override"""
fsnum = Column('fsnum', Integer, primary_key=True)
""" Failsafe number, uniquely assigned to every FailSafe"""
ind = Column('ind', ReprObjType, nullable=False)
""" the repr of the object used in the Symbol's index."""
val = Column('val', ReprObjType, nullable=False)
""" the repr of the object used as the Symbol's value."""
dt_log = Column('dt_log', DateTime, nullable=False)
""" datetime of the FailSafe creation."""
user = Column('user', String, nullable=True)
""" user name or process name that created the FailSafe"""
comment = Column('comment', String, nullable=True)
""" user field to store an arbitrary string about the FailSafe"""
# make a constructor just so sphinx doesn't pick up the
# base's __init__'s doc string.
def __init__(self, *args, **kwargs):
super(FailSafe, self).__init__(*args, **kwargs)
def SetupTrump(engine_string=None):
engine_str = engine_string or ENGINE_STR
try:
engine = create_engine(engine_str)
#Base.metadata.bind = engine
Base.metadata.create_all(engine)
print "Trump is installed @ " + engine_str
return engine
except ProgrammingError as pgerr:
print pgerr.statement
print pgerr.message
raise
|
{
"content_hash": "7af362a69f9af9bf9e23c44650da5930",
"timestamp": "",
"source": "github",
"line_count": 2368,
"max_line_length": 124,
"avg_line_length": 34.77787162162162,
"alnum_prop": 0.5366102435825825,
"repo_name": "jnmclarty/trump",
"id": "dbdb74aa65aea89dbcdaf26c0984bafda12f1246",
"size": "82666",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trump/orm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "248772"
}
],
"symlink_target": ""
}
|
from time import sleep
from unittest.mock import MagicMock
from wdom.document import get_document
from wdom.server import _tornado
from wdom.server.handler import event_handler
from wdom.window import customElements
from .base import TestCase
class TestWindow(TestCase):
def setUp(self):
super().setUp()
self.doc = get_document()
self.win = self.doc.defaultView
self.conn_mock = MagicMock()
_tornado.connections.append(self.conn_mock)
def tearDown(self):
_tornado.connections.remove(self.conn_mock)
def test_custom_elements_registory(self):
self.assertIs(self.win.customElements, customElements)
def test_document(self):
self.assertIs(self.win.document, self.doc)
self.assertIs(self.win, self.doc.defaultView)
def test_wdom_id(self):
self.assertEqual(self.win.wdom_id, 'window')
def test_add_eventlistener(self):
mock = MagicMock(_is_coroutine=False)
self.win.js_exec = MagicMock(_is_coroutine=False)
self.win.addEventListener('click', mock)
self.win.js_exec.assert_called_once_with('addEventListener', 'click')
msg = {
'type': 'click',
'currentTarget': {'id': 'window'},
'target': {'id': 'window'},
}
e = event_handler(msg)
mock.assert_called_once_with(e)
def test_add_event_handler_doc(self):
mock = MagicMock(_is_coroutine=False)
self.win.addEventListener('click', mock)
msg = {
'type': 'click',
'currentTarget': {'id': 'document'},
'target': {'id': 'document'},
}
event_handler(msg)
sleep(0.1)
mock.assert_not_called()
|
{
"content_hash": "869d17cd0ba07f3c52363e7f64ee51c1",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 77,
"avg_line_length": 30.946428571428573,
"alnum_prop": 0.6191575302942873,
"repo_name": "miyakogi/wdom",
"id": "aa2117d8d83cf44f6f2d889a1fd6277fb2d1a958",
"size": "1781",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/test_window.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "540"
},
{
"name": "HTML",
"bytes": "1316"
},
{
"name": "JavaScript",
"bytes": "16122"
},
{
"name": "Makefile",
"bytes": "2938"
},
{
"name": "Python",
"bytes": "512498"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "atmosphere.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "8585577f140404ee3a210642b957cc39",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 74,
"avg_line_length": 28.75,
"alnum_prop": 0.717391304347826,
"repo_name": "CCI-MOC/GUI-Backend",
"id": "9d87e2a1763bf04ee13f60d39ddd91cd784b86e3",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11571"
},
{
"name": "Python",
"bytes": "2565922"
},
{
"name": "Ruby",
"bytes": "1345"
},
{
"name": "Shell",
"bytes": "42018"
}
],
"symlink_target": ""
}
|
import unittest
import dedupe
from dedupe.variables.price import PriceType
class TestPrice(unittest.TestCase):
def test_comparator(self) :
assert PriceType.comparator(1, 10) == 1
assert PriceType.comparator(10, 1) == 1
|
{
"content_hash": "02151bc90f3da95b49dad73c6b4fdba7",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 47,
"avg_line_length": 24.2,
"alnum_prop": 0.71900826446281,
"repo_name": "davidkunio/dedupe",
"id": "24e75369393519ddd6a5973e15d167d46cf1a7d4",
"size": "242",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/test_price.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "195449"
},
{
"name": "Shell",
"bytes": "194"
}
],
"symlink_target": ""
}
|
'''
Created on Oct 20, 2015
@author: ahmadjaved.se@gmail.com
'''
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
|
{
"content_hash": "8e203d3d22cfee20aafa084e4ee16f7e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 36,
"avg_line_length": 13.714285714285714,
"alnum_prop": 0.71875,
"repo_name": "prikevs/pagination-sqlalchemy",
"id": "123661519d28b5bb6e05aa73c9c155b89a46d5c2",
"size": "192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mypaginator/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14833"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.